Swarm Learning¶

Case Reproduction¶

InĀ [76]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
from glob import glob
%matplotlib inline
import matplotlib.pyplot as plt
InĀ [77]:
all_xray_df = pd.read_csv("/Users/collindougherty/.cache/kagglehub/datasets/nih-chest-xrays/data/versions/3/Data_Entry_2017.csv")

path = "/Users/collindougherty/.cache/kagglehub/datasets/nih-chest-xrays/data/versions/3"

all_image_paths = {os.path.basename(x): x for x in glob(os.path.join(path, 'images_*', 'images', '*.png'))}

print('Scans found:', len(all_image_paths), ', Total Headers', all_xray_df.shape[0])
all_xray_df['path'] = all_xray_df['Image Index'].map(all_image_paths.get)

def process_age(age):
    if isinstance(age, str):
        # If it's a string, remove the last character and convert to int
        return int(age[:-1])
    elif isinstance(age, (int, float)):
        # If it's already a number, return it as is
        return int(age)
    else:
        # For any other type, return None or a default value
        return None

all_xray_df['Patient Age'] = all_xray_df['Patient Age'].map(process_age)

all_xray_df.sample(10)
Scans found: 112120 , Total Headers 112120
Out[77]:
Image Index Finding Labels Follow-up # Patient ID Patient Age Patient Gender View Position OriginalImage[Width Height] OriginalImagePixelSpacing[x y] Unnamed: 11 path
65113 00016074_013.png No Finding 13 16074 44 M AP 2500 2048 0.168 0.168 NaN /Users/collindougherty/.cache/kagglehub/datase...
69283 00017105_001.png No Finding 1 17105 49 F PA 2544 3056 0.139 0.139 NaN /Users/collindougherty/.cache/kagglehub/datase...
31655 00008275_005.png No Finding 5 8275 59 F PA 2992 2991 0.143 0.143 NaN /Users/collindougherty/.cache/kagglehub/datase...
3338 00000877_032.png No Finding 32 877 39 M AP 2500 2048 0.168 0.168 NaN /Users/collindougherty/.cache/kagglehub/datase...
79093 00019409_000.png Effusion|Pleural_Thickening 0 19409 55 F PA 2438 2991 0.143 0.143 NaN /Users/collindougherty/.cache/kagglehub/datase...
35873 00009453_005.png No Finding 5 9453 14 F PA 2048 2500 0.168 0.168 NaN /Users/collindougherty/.cache/kagglehub/datase...
20132 00005377_000.png Consolidation|Nodule 0 5377 53 F PA 2048 2500 0.171 0.171 NaN /Users/collindougherty/.cache/kagglehub/datase...
78858 00019363_022.png Infiltration 22 19363 55 F AP 2500 2048 0.168 0.168 NaN /Users/collindougherty/.cache/kagglehub/datase...
40396 00010524_003.png Cardiomegaly|Edema|Infiltration 3 10524 62 F PA 2610 2541 0.143 0.143 NaN /Users/collindougherty/.cache/kagglehub/datase...
30129 00007857_018.png No Finding 18 7857 53 F AP 2500 2048 0.168 0.168 NaN /Users/collindougherty/.cache/kagglehub/datase...
InĀ [78]:
# Specify the labels to keep as used in the Swarm Learning paper
labels_to_keep = ['Atelectasis', 'Effusion', 'Infiltration', 'No Finding']

# let's filter all_xray_df to only include the rows with the labels we want to keep
all_xray_df = all_xray_df[all_xray_df['Finding Labels'].isin(labels_to_keep)]

#all_xray_df['Finding Labels'] = all_xray_df['Finding Labels'].map(lambda x: x.replace('No Finding', ''))
from itertools import chain
all_labels = np.unique(list(chain(*all_xray_df['Finding Labels'].map(lambda x: x.split('|')).tolist())))
all_labels = [x for x in all_labels if len(x)>0]
print('All Labels ({}): {}'.format(len(all_labels), all_labels))
for c_label in all_labels:
    if len(c_label)>1: # leave out empty labels
        all_xray_df[c_label] = all_xray_df['Finding Labels'].map(lambda finding: 1.0 if c_label in finding else 0)
all_xray_df.sample(3)

print('Selected Labels ({})'.format(len(all_labels)))
for label in labels_to_keep:
    count = int(all_xray_df[label].sum())
    print(f"{label}: {count} cases")
All Labels (4): ['Atelectasis', 'Effusion', 'Infiltration', 'No Finding']
Selected Labels (4)
Atelectasis: 4215 cases
Effusion: 3955 cases
Infiltration: 9547 cases
No Finding: 60361 cases
InĀ [79]:
def get_balanced_subset(df, labels, no_finding_ratio=0.33):
    other_labels = [label for label in labels if label != 'No Finding']
    
    # Calculate the number of samples for each category
    other_count = min(df[label].sum() for label in other_labels)
    other_count = int(other_count)  # Ensure it's an integer
    
    total_other_samples = other_count * len(other_labels)
    no_finding_count = int(total_other_samples * no_finding_ratio / (1 - no_finding_ratio))
    
    # Sample 'No Finding' cases
    no_finding_df = df[df['No Finding'] == 1].sample(no_finding_count, replace=False, random_state=42)
    
    # Sample other categories
    other_dfs = []
    for label in other_labels:
        label_df = df[df[label] == 1].sample(other_count, replace=False, random_state=42)
        other_dfs.append(label_df)
    
    # Combine all sampled dataframes
    balanced_df = pd.concat([no_finding_df] + other_dfs, axis=0)
    return balanced_df.sample(frac=1, random_state=42).reset_index(drop=True)  # Shuffle the dataframe

# Get the balanced subset
balanced_df = get_balanced_subset(all_xray_df, all_labels)

# Verify the distribution
label_counts = balanced_df[all_labels].sum()
total_samples = len(balanced_df)

print("Label distribution:")
for label in all_labels:
    count = label_counts[label]
    percentage = (count / total_samples) * 100
    print(f"{label}: {count} ({percentage:.2f}%)")

print(f"\nTotal samples: {total_samples}")

# Check for multi-label instances
multi_label = balanced_df[all_labels].sum(axis=1) > 1
print(f"\nSamples with multiple labels: {multi_label.sum()} ({multi_label.sum() / len(balanced_df) * 100:.2f}%)")

all_xray_df = balanced_df
Label distribution:
Atelectasis: 3955.0 (22.33%)
Effusion: 3955.0 (22.33%)
Infiltration: 3955.0 (22.33%)
No Finding: 5843.0 (33.00%)

Total samples: 17708

Samples with multiple labels: 0 (0.00%)
InĀ [80]:
all_xray_df['disease_vec'] = all_xray_df.apply(lambda x: [x[all_labels].values], 1).map(lambda x: x[0])

from sklearn.model_selection import train_test_split
train_df, valid_df = train_test_split(all_xray_df, 
                                   test_size = 0.2, 
                                   random_state = 42,
                                   stratify = all_xray_df['Finding Labels'].map(lambda x: x[:4]))
print('train', train_df.shape[0], 'validation', valid_df.shape[0])
train 14166 validation 3542
InĀ [Ā ]:
# let's split train_df into 3 nodes randomly, each with specified probability
node1_df = train_df.sample(frac=0.3, random_state=42)
node2_df = train_df.drop(node1_df.index).sample(frac=0.32, random_state=42)
node3_df = train_df.drop(node1_df.index).drop(node2_df.index)
InĀ [82]:
from keras.preprocessing.image import ImageDataGenerator
IMG_SIZE = (128, 128)
core_idg = ImageDataGenerator(samplewise_center=True, 
                              samplewise_std_normalization=True, 
                              horizontal_flip = True, 
                              vertical_flip = False, 
                              height_shift_range= 0.05, 
                              width_shift_range=0.1, 
                              rotation_range=5, 
                              shear_range = 0.1,
                              fill_mode = 'reflect',
                              zoom_range=0.15)
InĀ [83]:
valid_df['newLabel'] = valid_df.apply(lambda x: x['Finding Labels'].split('|'), axis=1)

node1_df['newLabel'] = node1_df.apply(lambda x: x['Finding Labels'].split('|'), axis=1)
node2_df['newLabel'] = node2_df.apply(lambda x: x['Finding Labels'].split('|'), axis=1)
node3_df['newLabel'] = node3_df.apply(lambda x: x['Finding Labels'].split('|'), axis=1)

node1_gen = core_idg.flow_from_dataframe(
    dataframe=node1_df,
    directory=None,
    x_col='path',
    y_col='newLabel',
    class_mode='categorical',
    classes=all_labels,
    target_size=IMG_SIZE,
    color_mode='grayscale',
    batch_size=32
)

node2_gen = core_idg.flow_from_dataframe(
    dataframe=node2_df,
    directory=None,
    x_col='path',
    y_col='newLabel',
    class_mode='categorical',
    classes=all_labels,
    target_size=IMG_SIZE,
    color_mode='grayscale',
    batch_size=32
)

node3_gen = core_idg.flow_from_dataframe(
    dataframe=node3_df,
    directory=None,
    x_col='path',
    y_col='newLabel',
    class_mode='categorical',
    classes=all_labels,
    target_size=IMG_SIZE,
    color_mode='grayscale',
    batch_size=32
)

valid_gen = core_idg.flow_from_dataframe(
    dataframe=valid_df,
    directory=None,
    x_col='path',
    y_col='newLabel',
    class_mode='categorical',
    classes=all_labels,
    target_size=IMG_SIZE,
    color_mode='grayscale',
    batch_size=256
)

# For test data
test_X, test_Y = next(core_idg.flow_from_dataframe(
    dataframe=valid_df,
    directory=None,
    x_col='path',
    y_col='newLabel',
    class_mode='categorical',
    classes=all_labels,
    target_size=IMG_SIZE,
    color_mode='grayscale',
    batch_size=1024
))
Found 4250 validated image filenames belonging to 4 classes.
Found 1983 validated image filenames belonging to 4 classes.
Found 7933 validated image filenames belonging to 4 classes.
Found 3542 validated image filenames belonging to 4 classes.
Found 3542 validated image filenames belonging to 4 classes.

Node 1¶

InĀ [84]:
t_x, t_y = next(node1_gen)
fig, m_axs = plt.subplots(4, 4, figsize = (16, 16))
for (c_x, c_y, c_ax) in zip(t_x, t_y, m_axs.flatten()):
    c_ax.imshow(c_x[:,:,0], cmap = 'bone', vmin = -1.5, vmax = 1.5)
    c_ax.set_title(', '.join([n_class for n_class, n_score in zip(all_labels, c_y) 
                             if n_score>0.5]))
    c_ax.axis('off')

from keras.applications.mobilenet import MobileNet
from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten
from keras.models import Sequential
base_mobilenet_model = MobileNet(input_shape =  t_x.shape[1:], 
                                 include_top = False, weights = None)
node1_model = Sequential()
node1_model.add(base_mobilenet_model)
node1_model.add(GlobalAveragePooling2D())
node1_model.add(Dropout(0.5))
node1_model.add(Dense(512))
node1_model.add(Dropout(0.5))
node1_model.add(Dense(len(all_labels), activation = 'sigmoid'))
node1_model.compile(optimizer = 'adam', loss = 'binary_crossentropy',
                           metrics = ['binary_accuracy', 'mae'])
node1_model.summary()


from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau
weight_path="{}_weights.best.hdf5".format('xray_class')

checkpoint = ModelCheckpoint(weight_path, monitor='val_loss', verbose=1, 
                             save_best_only=True, mode='min', save_weights_only = True)

early = EarlyStopping(monitor="val_loss", 
                      mode="min", 
                      patience=10)
callbacks_list = [checkpoint, early]

node1_model.fit_generator(node1_gen, 
                                  steps_per_epoch=100,
                                  validation_data = (test_X, test_Y), 
                                  epochs = 100, 
                                  callbacks = callbacks_list)

pred_Y1 = node1_model.predict(test_X, batch_size = 32, verbose = True)

from sklearn.metrics import roc_curve, auc
fig, c_ax = plt.subplots(1,1, figsize = (9, 9))
for (idx, c_label) in enumerate(all_labels):
    fpr, tpr, thresholds = roc_curve(test_Y[:,idx].astype(int), pred_Y1[:,idx])
    c_ax.plot(fpr, tpr, label = '%s (AUC:%0.2f)'  % (c_label, auc(fpr, tpr)))
c_ax.legend()
c_ax.set_xlabel('False Positive Rate')
c_ax.set_ylabel('True Positive Rate')
fig.savefig('barely_trained_net.png')
Model: "sequential_8"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 mobilenet_1.00_128 (Functi  (None, 4, 4, 1024)        3228288   
 onal)                                                           
                                                                 
 global_average_pooling2d_8  (None, 1024)              0         
  (GlobalAveragePooling2D)                                       
                                                                 
 dropout_16 (Dropout)        (None, 1024)              0         
                                                                 
 dense_16 (Dense)            (None, 512)               524800    
                                                                 
 dropout_17 (Dropout)        (None, 512)               0         
                                                                 
 dense_17 (Dense)            (None, 4)                 2052      
                                                                 
=================================================================
Total params: 3755140 (14.32 MB)
Trainable params: 3733252 (14.24 MB)
Non-trainable params: 21888 (85.50 KB)
_________________________________________________________________
/var/folders/w3/gtm29qhx0wj4wg_y2_jw07hw0000gn/T/ipykernel_76436/1741217282.py:37: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  node1_model.fit_generator(node1_gen,
Epoch 1/100
100/100 [==============================] - ETA: 0s - loss: 0.7833 - binary_accuracy: 0.6763 - mae: 0.3661
Epoch 1: val_loss improved from inf to 0.60762, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 99s 979ms/step - loss: 0.7833 - binary_accuracy: 0.6763 - mae: 0.3661 - val_loss: 0.6076 - val_binary_accuracy: 0.6646 - val_mae: 0.4288
Epoch 2/100
100/100 [==============================] - ETA: 0s - loss: 0.6877 - binary_accuracy: 0.6906 - mae: 0.3650
Epoch 2: val_loss did not improve from 0.60762
100/100 [==============================] - 109s 1s/step - loss: 0.6877 - binary_accuracy: 0.6906 - mae: 0.3650 - val_loss: 0.6270 - val_binary_accuracy: 0.6021 - val_mae: 0.4017
Epoch 3/100
100/100 [==============================] - ETA: 0s - loss: 0.6284 - binary_accuracy: 0.7101 - mae: 0.3579
Epoch 3: val_loss improved from 0.60762 to 0.56736, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 109s 1s/step - loss: 0.6284 - binary_accuracy: 0.7101 - mae: 0.3579 - val_loss: 0.5674 - val_binary_accuracy: 0.7500 - val_mae: 0.3677
Epoch 4/100
100/100 [==============================] - ETA: 0s - loss: 0.5890 - binary_accuracy: 0.7267 - mae: 0.3588
Epoch 4: val_loss did not improve from 0.56736
100/100 [==============================] - 117s 1s/step - loss: 0.5890 - binary_accuracy: 0.7267 - mae: 0.3588 - val_loss: 0.5716 - val_binary_accuracy: 0.7500 - val_mae: 0.3677
Epoch 5/100
100/100 [==============================] - ETA: 0s - loss: 0.5688 - binary_accuracy: 0.7342 - mae: 0.3556
Epoch 5: val_loss did not improve from 0.56736
100/100 [==============================] - 112s 1s/step - loss: 0.5688 - binary_accuracy: 0.7342 - mae: 0.3556 - val_loss: 0.6232 - val_binary_accuracy: 0.6646 - val_mae: 0.3500
Epoch 6/100
100/100 [==============================] - ETA: 0s - loss: 0.5606 - binary_accuracy: 0.7420 - mae: 0.3558
Epoch 6: val_loss improved from 0.56736 to 0.56584, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 124s 1s/step - loss: 0.5606 - binary_accuracy: 0.7420 - mae: 0.3558 - val_loss: 0.5658 - val_binary_accuracy: 0.7500 - val_mae: 0.3593
Epoch 7/100
100/100 [==============================] - ETA: 0s - loss: 0.5460 - binary_accuracy: 0.7456 - mae: 0.3532
Epoch 7: val_loss did not improve from 0.56584
100/100 [==============================] - 110s 1s/step - loss: 0.5460 - binary_accuracy: 0.7456 - mae: 0.3532 - val_loss: 0.5697 - val_binary_accuracy: 0.7495 - val_mae: 0.3575
Epoch 8/100
100/100 [==============================] - ETA: 0s - loss: 0.5449 - binary_accuracy: 0.7469 - mae: 0.3540
Epoch 8: val_loss improved from 0.56584 to 0.53211, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 112s 1s/step - loss: 0.5449 - binary_accuracy: 0.7469 - mae: 0.3540 - val_loss: 0.5321 - val_binary_accuracy: 0.7520 - val_mae: 0.3485
Epoch 9/100
100/100 [==============================] - ETA: 0s - loss: 0.5353 - binary_accuracy: 0.7498 - mae: 0.3475
Epoch 9: val_loss did not improve from 0.53211
100/100 [==============================] - 106s 1s/step - loss: 0.5353 - binary_accuracy: 0.7498 - mae: 0.3475 - val_loss: 0.5366 - val_binary_accuracy: 0.7493 - val_mae: 0.3439
Epoch 10/100
100/100 [==============================] - ETA: 0s - loss: 0.5344 - binary_accuracy: 0.7521 - mae: 0.3467
Epoch 10: val_loss did not improve from 0.53211
100/100 [==============================] - 130s 1s/step - loss: 0.5344 - binary_accuracy: 0.7521 - mae: 0.3467 - val_loss: 0.5421 - val_binary_accuracy: 0.7458 - val_mae: 0.3347
Epoch 11/100
100/100 [==============================] - ETA: 0s - loss: 0.5353 - binary_accuracy: 0.7484 - mae: 0.3494
Epoch 11: val_loss improved from 0.53211 to 0.52626, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 113s 1s/step - loss: 0.5353 - binary_accuracy: 0.7484 - mae: 0.3494 - val_loss: 0.5263 - val_binary_accuracy: 0.7544 - val_mae: 0.3481
Epoch 12/100
100/100 [==============================] - ETA: 0s - loss: 0.5304 - binary_accuracy: 0.7517 - mae: 0.3456
Epoch 12: val_loss did not improve from 0.52626
100/100 [==============================] - 103s 1s/step - loss: 0.5304 - binary_accuracy: 0.7517 - mae: 0.3456 - val_loss: 0.5303 - val_binary_accuracy: 0.7495 - val_mae: 0.3508
Epoch 13/100
100/100 [==============================] - ETA: 0s - loss: 0.5269 - binary_accuracy: 0.7520 - mae: 0.3443
Epoch 13: val_loss did not improve from 0.52626
100/100 [==============================] - 107s 1s/step - loss: 0.5269 - binary_accuracy: 0.7520 - mae: 0.3443 - val_loss: 0.5463 - val_binary_accuracy: 0.7476 - val_mae: 0.3533
Epoch 14/100
100/100 [==============================] - ETA: 0s - loss: 0.5284 - binary_accuracy: 0.7520 - mae: 0.3449
Epoch 14: val_loss did not improve from 0.52626
100/100 [==============================] - 116s 1s/step - loss: 0.5284 - binary_accuracy: 0.7520 - mae: 0.3449 - val_loss: 0.5265 - val_binary_accuracy: 0.7549 - val_mae: 0.3465
Epoch 15/100
100/100 [==============================] - ETA: 0s - loss: 0.5287 - binary_accuracy: 0.7516 - mae: 0.3454
Epoch 15: val_loss improved from 0.52626 to 0.52498, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 111s 1s/step - loss: 0.5287 - binary_accuracy: 0.7516 - mae: 0.3454 - val_loss: 0.5250 - val_binary_accuracy: 0.7529 - val_mae: 0.3452
Epoch 16/100
100/100 [==============================] - ETA: 0s - loss: 0.5223 - binary_accuracy: 0.7536 - mae: 0.3410
Epoch 16: val_loss improved from 0.52498 to 0.52299, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 114s 1s/step - loss: 0.5223 - binary_accuracy: 0.7536 - mae: 0.3410 - val_loss: 0.5230 - val_binary_accuracy: 0.7551 - val_mae: 0.3349
Epoch 17/100
100/100 [==============================] - ETA: 0s - loss: 0.5223 - binary_accuracy: 0.7548 - mae: 0.3406
Epoch 17: val_loss improved from 0.52299 to 0.51960, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 107s 1s/step - loss: 0.5223 - binary_accuracy: 0.7548 - mae: 0.3406 - val_loss: 0.5196 - val_binary_accuracy: 0.7568 - val_mae: 0.3373
Epoch 18/100
100/100 [==============================] - ETA: 0s - loss: 0.5223 - binary_accuracy: 0.7536 - mae: 0.3415
Epoch 18: val_loss did not improve from 0.51960
100/100 [==============================] - 109s 1s/step - loss: 0.5223 - binary_accuracy: 0.7536 - mae: 0.3415 - val_loss: 0.5321 - val_binary_accuracy: 0.7517 - val_mae: 0.3518
Epoch 19/100
100/100 [==============================] - ETA: 0s - loss: 0.5228 - binary_accuracy: 0.7547 - mae: 0.3404
Epoch 19: val_loss did not improve from 0.51960
100/100 [==============================] - 88s 863ms/step - loss: 0.5228 - binary_accuracy: 0.7547 - mae: 0.3404 - val_loss: 0.5323 - val_binary_accuracy: 0.7532 - val_mae: 0.3482
Epoch 20/100
100/100 [==============================] - ETA: 0s - loss: 0.5231 - binary_accuracy: 0.7519 - mae: 0.3405
Epoch 20: val_loss did not improve from 0.51960
100/100 [==============================] - 38s 374ms/step - loss: 0.5231 - binary_accuracy: 0.7519 - mae: 0.3405 - val_loss: 0.5404 - val_binary_accuracy: 0.7349 - val_mae: 0.3533
Epoch 21/100
100/100 [==============================] - ETA: 0s - loss: 0.5208 - binary_accuracy: 0.7537 - mae: 0.3390
Epoch 21: val_loss did not improve from 0.51960
100/100 [==============================] - 39s 388ms/step - loss: 0.5208 - binary_accuracy: 0.7537 - mae: 0.3390 - val_loss: 0.5247 - val_binary_accuracy: 0.7566 - val_mae: 0.3353
Epoch 22/100
100/100 [==============================] - ETA: 0s - loss: 0.5161 - binary_accuracy: 0.7595 - mae: 0.3371
Epoch 22: val_loss did not improve from 0.51960
100/100 [==============================] - 40s 394ms/step - loss: 0.5161 - binary_accuracy: 0.7595 - mae: 0.3371 - val_loss: 0.5446 - val_binary_accuracy: 0.7378 - val_mae: 0.3372
Epoch 23/100
100/100 [==============================] - ETA: 0s - loss: 0.5136 - binary_accuracy: 0.7603 - mae: 0.3347
Epoch 23: val_loss did not improve from 0.51960
100/100 [==============================] - 39s 384ms/step - loss: 0.5136 - binary_accuracy: 0.7603 - mae: 0.3347 - val_loss: 0.5196 - val_binary_accuracy: 0.7603 - val_mae: 0.3395
Epoch 24/100
100/100 [==============================] - ETA: 0s - loss: 0.5098 - binary_accuracy: 0.7601 - mae: 0.3330
Epoch 24: val_loss improved from 0.51960 to 0.51526, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 39s 387ms/step - loss: 0.5098 - binary_accuracy: 0.7601 - mae: 0.3330 - val_loss: 0.5153 - val_binary_accuracy: 0.7507 - val_mae: 0.3355
Epoch 25/100
100/100 [==============================] - ETA: 0s - loss: 0.5163 - binary_accuracy: 0.7538 - mae: 0.3342
Epoch 25: val_loss did not improve from 0.51526
100/100 [==============================] - 44s 438ms/step - loss: 0.5163 - binary_accuracy: 0.7538 - mae: 0.3342 - val_loss: 0.5191 - val_binary_accuracy: 0.7554 - val_mae: 0.3267
Epoch 26/100
100/100 [==============================] - ETA: 0s - loss: 0.5112 - binary_accuracy: 0.7583 - mae: 0.3322
Epoch 26: val_loss did not improve from 0.51526
100/100 [==============================] - 49s 490ms/step - loss: 0.5112 - binary_accuracy: 0.7583 - mae: 0.3322 - val_loss: 0.5195 - val_binary_accuracy: 0.7566 - val_mae: 0.3367
Epoch 27/100
100/100 [==============================] - ETA: 0s - loss: 0.5066 - binary_accuracy: 0.7623 - mae: 0.3303
Epoch 27: val_loss did not improve from 0.51526
100/100 [==============================] - 48s 476ms/step - loss: 0.5066 - binary_accuracy: 0.7623 - mae: 0.3303 - val_loss: 0.5446 - val_binary_accuracy: 0.7405 - val_mae: 0.3427
Epoch 28/100
100/100 [==============================] - ETA: 0s - loss: 0.5126 - binary_accuracy: 0.7544 - mae: 0.3338
Epoch 28: val_loss did not improve from 0.51526
100/100 [==============================] - 46s 459ms/step - loss: 0.5126 - binary_accuracy: 0.7544 - mae: 0.3338 - val_loss: 0.5167 - val_binary_accuracy: 0.7620 - val_mae: 0.3371
Epoch 29/100
100/100 [==============================] - ETA: 0s - loss: 0.5069 - binary_accuracy: 0.7633 - mae: 0.3303
Epoch 29: val_loss did not improve from 0.51526
100/100 [==============================] - 48s 476ms/step - loss: 0.5069 - binary_accuracy: 0.7633 - mae: 0.3303 - val_loss: 0.5437 - val_binary_accuracy: 0.7502 - val_mae: 0.3333
Epoch 30/100
100/100 [==============================] - ETA: 0s - loss: 0.5115 - binary_accuracy: 0.7591 - mae: 0.3323
Epoch 30: val_loss did not improve from 0.51526
100/100 [==============================] - 45s 448ms/step - loss: 0.5115 - binary_accuracy: 0.7591 - mae: 0.3323 - val_loss: 0.5225 - val_binary_accuracy: 0.7466 - val_mae: 0.3363
Epoch 31/100
100/100 [==============================] - ETA: 0s - loss: 0.5020 - binary_accuracy: 0.7630 - mae: 0.3254
Epoch 31: val_loss did not improve from 0.51526
100/100 [==============================] - 46s 453ms/step - loss: 0.5020 - binary_accuracy: 0.7630 - mae: 0.3254 - val_loss: 0.5183 - val_binary_accuracy: 0.7566 - val_mae: 0.3442
Epoch 32/100
100/100 [==============================] - ETA: 0s - loss: 0.5101 - binary_accuracy: 0.7613 - mae: 0.3317
Epoch 32: val_loss improved from 0.51526 to 0.51046, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 46s 459ms/step - loss: 0.5101 - binary_accuracy: 0.7613 - mae: 0.3317 - val_loss: 0.5105 - val_binary_accuracy: 0.7605 - val_mae: 0.3226
Epoch 33/100
100/100 [==============================] - ETA: 0s - loss: 0.5051 - binary_accuracy: 0.7609 - mae: 0.3284
Epoch 33: val_loss did not improve from 0.51046
100/100 [==============================] - 47s 470ms/step - loss: 0.5051 - binary_accuracy: 0.7609 - mae: 0.3284 - val_loss: 0.5159 - val_binary_accuracy: 0.7590 - val_mae: 0.3340
Epoch 34/100
100/100 [==============================] - ETA: 0s - loss: 0.5002 - binary_accuracy: 0.7614 - mae: 0.3243
Epoch 34: val_loss did not improve from 0.51046
100/100 [==============================] - 46s 457ms/step - loss: 0.5002 - binary_accuracy: 0.7614 - mae: 0.3243 - val_loss: 0.5163 - val_binary_accuracy: 0.7520 - val_mae: 0.3330
Epoch 35/100
100/100 [==============================] - ETA: 0s - loss: 0.5034 - binary_accuracy: 0.7599 - mae: 0.3275
Epoch 35: val_loss did not improve from 0.51046
100/100 [==============================] - 43s 429ms/step - loss: 0.5034 - binary_accuracy: 0.7599 - mae: 0.3275 - val_loss: 0.5214 - val_binary_accuracy: 0.7537 - val_mae: 0.3201
Epoch 36/100
100/100 [==============================] - ETA: 0s - loss: 0.5030 - binary_accuracy: 0.7622 - mae: 0.3263
Epoch 36: val_loss did not improve from 0.51046
100/100 [==============================] - 41s 408ms/step - loss: 0.5030 - binary_accuracy: 0.7622 - mae: 0.3263 - val_loss: 0.5595 - val_binary_accuracy: 0.7358 - val_mae: 0.3245
Epoch 37/100
100/100 [==============================] - ETA: 0s - loss: 0.4987 - binary_accuracy: 0.7645 - mae: 0.3234
Epoch 37: val_loss did not improve from 0.51046
100/100 [==============================] - 41s 403ms/step - loss: 0.4987 - binary_accuracy: 0.7645 - mae: 0.3234 - val_loss: 0.5630 - val_binary_accuracy: 0.7434 - val_mae: 0.3383
Epoch 38/100
100/100 [==============================] - ETA: 0s - loss: 0.5025 - binary_accuracy: 0.7616 - mae: 0.3258
Epoch 38: val_loss did not improve from 0.51046
100/100 [==============================] - 47s 468ms/step - loss: 0.5025 - binary_accuracy: 0.7616 - mae: 0.3258 - val_loss: 0.5108 - val_binary_accuracy: 0.7563 - val_mae: 0.3424
Epoch 39/100
100/100 [==============================] - ETA: 0s - loss: 0.5002 - binary_accuracy: 0.7636 - mae: 0.3255
Epoch 39: val_loss did not improve from 0.51046
100/100 [==============================] - 64s 640ms/step - loss: 0.5002 - binary_accuracy: 0.7636 - mae: 0.3255 - val_loss: 0.5366 - val_binary_accuracy: 0.7505 - val_mae: 0.3282
Epoch 40/100
100/100 [==============================] - ETA: 0s - loss: 0.4908 - binary_accuracy: 0.7660 - mae: 0.3177
Epoch 40: val_loss did not improve from 0.51046
100/100 [==============================] - 56s 553ms/step - loss: 0.4908 - binary_accuracy: 0.7660 - mae: 0.3177 - val_loss: 0.5341 - val_binary_accuracy: 0.7444 - val_mae: 0.3189
Epoch 41/100
100/100 [==============================] - ETA: 0s - loss: 0.4944 - binary_accuracy: 0.7671 - mae: 0.3211
Epoch 41: val_loss did not improve from 0.51046
100/100 [==============================] - 50s 499ms/step - loss: 0.4944 - binary_accuracy: 0.7671 - mae: 0.3211 - val_loss: 0.5252 - val_binary_accuracy: 0.7573 - val_mae: 0.3331
Epoch 42/100
100/100 [==============================] - ETA: 0s - loss: 0.4923 - binary_accuracy: 0.7652 - mae: 0.3202
Epoch 42: val_loss did not improve from 0.51046
100/100 [==============================] - 51s 508ms/step - loss: 0.4923 - binary_accuracy: 0.7652 - mae: 0.3202 - val_loss: 0.5311 - val_binary_accuracy: 0.7490 - val_mae: 0.3119
32/32 [==============================] - 3s 87ms/step
No description has been provided for this image
No description has been provided for this image

Node 2¶

InĀ [85]:
t_x, t_y = next(node2_gen)
fig, m_axs = plt.subplots(4, 4, figsize = (16, 16))
for (c_x, c_y, c_ax) in zip(t_x, t_y, m_axs.flatten()):
    c_ax.imshow(c_x[:,:,0], cmap = 'bone', vmin = -1.5, vmax = 1.5)
    c_ax.set_title(', '.join([n_class for n_class, n_score in zip(all_labels, c_y) 
                             if n_score>0.5]))
    c_ax.axis('off')

from keras.applications.mobilenet import MobileNet
from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten
from keras.models import Sequential
base_mobilenet_model = MobileNet(input_shape =  t_x.shape[1:], 
                                 include_top = False, weights = None)
node2_model = Sequential()
node2_model.add(base_mobilenet_model)
node2_model.add(GlobalAveragePooling2D())
node2_model.add(Dropout(0.5))
node2_model.add(Dense(512))
node2_model.add(Dropout(0.5))
node2_model.add(Dense(len(all_labels), activation = 'sigmoid'))
node2_model.compile(optimizer = 'adam', loss = 'binary_crossentropy',
                           metrics = ['binary_accuracy', 'mae'])
node2_model.summary()


from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau
weight_path="{}_weights.best.hdf5".format('xray_class')

checkpoint = ModelCheckpoint(weight_path, monitor='val_loss', verbose=1, 
                             save_best_only=True, mode='min', save_weights_only = True)

early = EarlyStopping(monitor="val_loss", 
                      mode="min", 
                      patience=10)
callbacks_list = [checkpoint, early]

node2_model.fit_generator(node2_gen, 
                                  steps_per_epoch=100,
                                  validation_data = (test_X, test_Y), 
                                  epochs = 100, 
                                  callbacks = callbacks_list)

pred_Y2 = node2_model.predict(test_X, batch_size = 32, verbose = True)

from sklearn.metrics import roc_curve, auc
fig, c_ax = plt.subplots(1,1, figsize = (9, 9))
for (idx, c_label) in enumerate(all_labels):
    fpr, tpr, thresholds = roc_curve(test_Y[:,idx].astype(int), pred_Y2[:,idx])
    c_ax.plot(fpr, tpr, label = '%s (AUC:%0.2f)'  % (c_label, auc(fpr, tpr)))
c_ax.legend()
c_ax.set_xlabel('False Positive Rate')
c_ax.set_ylabel('True Positive Rate')
fig.savefig('barely_trained_net.png')
Model: "sequential_9"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 mobilenet_1.00_128 (Functi  (None, 4, 4, 1024)        3228288   
 onal)                                                           
                                                                 
 global_average_pooling2d_9  (None, 1024)              0         
  (GlobalAveragePooling2D)                                       
                                                                 
 dropout_18 (Dropout)        (None, 1024)              0         
                                                                 
 dense_18 (Dense)            (None, 512)               524800    
                                                                 
 dropout_19 (Dropout)        (None, 512)               0         
                                                                 
 dense_19 (Dense)            (None, 4)                 2052      
                                                                 
=================================================================
Total params: 3755140 (14.32 MB)
Trainable params: 3733252 (14.24 MB)
Non-trainable params: 21888 (85.50 KB)
_________________________________________________________________
/var/folders/w3/gtm29qhx0wj4wg_y2_jw07hw0000gn/T/ipykernel_76436/2851919898.py:37: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  node2_model.fit_generator(node2_gen,
Epoch 1/100
 62/100 [=================>............] - ETA: 18s - loss: 0.8574 - binary_accuracy: 0.6713 - mae: 0.3713WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 10000 batches). You may need to use the repeat() function when building your dataset.

Epoch 1: val_loss improved from inf to 0.60259, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 34s 331ms/step - loss: 0.8574 - binary_accuracy: 0.6713 - mae: 0.3713 - val_loss: 0.6026 - val_binary_accuracy: 0.7500 - val_mae: 0.4384
32/32 [==============================] - 3s 77ms/step
No description has been provided for this image
No description has been provided for this image

Node 3¶

InĀ [86]:
t_x, t_y = next(node3_gen)
fig, m_axs = plt.subplots(4, 4, figsize = (16, 16))
for (c_x, c_y, c_ax) in zip(t_x, t_y, m_axs.flatten()):
    c_ax.imshow(c_x[:,:,0], cmap = 'bone', vmin = -1.5, vmax = 1.5)
    c_ax.set_title(', '.join([n_class for n_class, n_score in zip(all_labels, c_y) 
                             if n_score>0.5]))
    c_ax.axis('off')

from keras.applications.mobilenet import MobileNet
from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten
from keras.models import Sequential
base_mobilenet_model = MobileNet(input_shape =  t_x.shape[1:], 
                                 include_top = False, weights = None)
node3_model = Sequential()
node3_model.add(base_mobilenet_model)
node3_model.add(GlobalAveragePooling2D())
node3_model.add(Dropout(0.5))
node3_model.add(Dense(512))
node3_model.add(Dropout(0.5))
node3_model.add(Dense(len(all_labels), activation = 'sigmoid'))
node3_model.compile(optimizer = 'adam', loss = 'binary_crossentropy',
                           metrics = ['binary_accuracy', 'mae'])
node3_model.summary()


from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau
weight_path="{}_weights.best.hdf5".format('xray_class')

checkpoint = ModelCheckpoint(weight_path, monitor='val_loss', verbose=1, 
                             save_best_only=True, mode='min', save_weights_only = True)

early = EarlyStopping(monitor="val_loss", 
                      mode="min", 
                      patience=10)
callbacks_list = [checkpoint, early]

node3_model.fit_generator(node3_gen, 
                                  steps_per_epoch=100,
                                  validation_data = (test_X, test_Y), 
                                  epochs = 100, 
                                  callbacks = callbacks_list)

pred_Y3 = node3_model.predict(test_X, batch_size = 32, verbose = True)

from sklearn.metrics import roc_curve, auc
fig, c_ax = plt.subplots(1,1, figsize = (9, 9))
for (idx, c_label) in enumerate(all_labels):
    fpr, tpr, thresholds = roc_curve(test_Y[:,idx].astype(int), pred_Y3[:,idx])
    c_ax.plot(fpr, tpr, label = '%s (AUC:%0.2f)'  % (c_label, auc(fpr, tpr)))
c_ax.legend()
c_ax.set_xlabel('False Positive Rate')
c_ax.set_ylabel('True Positive Rate')
fig.savefig('barely_trained_net.png')
Model: "sequential_10"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 mobilenet_1.00_128 (Functi  (None, 4, 4, 1024)        3228288   
 onal)                                                           
                                                                 
 global_average_pooling2d_1  (None, 1024)              0         
 0 (GlobalAveragePooling2D)                                      
                                                                 
 dropout_20 (Dropout)        (None, 1024)              0         
                                                                 
 dense_20 (Dense)            (None, 512)               524800    
                                                                 
 dropout_21 (Dropout)        (None, 512)               0         
                                                                 
 dense_21 (Dense)            (None, 4)                 2052      
                                                                 
=================================================================
Total params: 3755140 (14.32 MB)
Trainable params: 3733252 (14.24 MB)
Non-trainable params: 21888 (85.50 KB)
_________________________________________________________________
/var/folders/w3/gtm29qhx0wj4wg_y2_jw07hw0000gn/T/ipykernel_76436/2074365489.py:37: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  node3_model.fit_generator(node3_gen,
Epoch 1/100
100/100 [==============================] - ETA: 0s - loss: 0.7804 - binary_accuracy: 0.6963 - mae: 0.3694
Epoch 1: val_loss improved from inf to 0.59659, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 55s 538ms/step - loss: 0.7804 - binary_accuracy: 0.6963 - mae: 0.3694 - val_loss: 0.5966 - val_binary_accuracy: 0.6646 - val_mae: 0.4017
Epoch 2/100
100/100 [==============================] - ETA: 0s - loss: 0.6484 - binary_accuracy: 0.7240 - mae: 0.3669
Epoch 2: val_loss did not improve from 0.59659
100/100 [==============================] - 50s 501ms/step - loss: 0.6484 - binary_accuracy: 0.7240 - mae: 0.3669 - val_loss: 0.6054 - val_binary_accuracy: 0.7500 - val_mae: 0.3413
Epoch 3/100
100/100 [==============================] - ETA: 0s - loss: 0.5747 - binary_accuracy: 0.7382 - mae: 0.3608
Epoch 3: val_loss did not improve from 0.59659
100/100 [==============================] - 48s 482ms/step - loss: 0.5747 - binary_accuracy: 0.7382 - mae: 0.3608 - val_loss: 0.6639 - val_binary_accuracy: 0.7500 - val_mae: 0.3048
Epoch 4/100
100/100 [==============================] - ETA: 0s - loss: 0.5622 - binary_accuracy: 0.7434 - mae: 0.3604
Epoch 4: val_loss did not improve from 0.59659
100/100 [==============================] - 48s 481ms/step - loss: 0.5622 - binary_accuracy: 0.7434 - mae: 0.3604 - val_loss: 0.7012 - val_binary_accuracy: 0.7500 - val_mae: 0.2923
Epoch 5/100
100/100 [==============================] - ETA: 0s - loss: 0.5492 - binary_accuracy: 0.7456 - mae: 0.3536
Epoch 5: val_loss did not improve from 0.59659
100/100 [==============================] - 50s 494ms/step - loss: 0.5492 - binary_accuracy: 0.7456 - mae: 0.3536 - val_loss: 0.7031 - val_binary_accuracy: 0.7500 - val_mae: 0.3012
Epoch 6/100
100/100 [==============================] - ETA: 0s - loss: 0.5446 - binary_accuracy: 0.7452 - mae: 0.3522
Epoch 6: val_loss did not improve from 0.59659
100/100 [==============================] - 49s 488ms/step - loss: 0.5446 - binary_accuracy: 0.7452 - mae: 0.3522 - val_loss: 0.6415 - val_binary_accuracy: 0.7500 - val_mae: 0.3111
Epoch 7/100
100/100 [==============================] - ETA: 0s - loss: 0.5405 - binary_accuracy: 0.7509 - mae: 0.3490
Epoch 7: val_loss improved from 0.59659 to 0.57882, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 48s 479ms/step - loss: 0.5405 - binary_accuracy: 0.7509 - mae: 0.3490 - val_loss: 0.5788 - val_binary_accuracy: 0.7500 - val_mae: 0.3274
Epoch 8/100
100/100 [==============================] - ETA: 0s - loss: 0.5401 - binary_accuracy: 0.7491 - mae: 0.3510
Epoch 8: val_loss improved from 0.57882 to 0.54629, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 50s 501ms/step - loss: 0.5401 - binary_accuracy: 0.7491 - mae: 0.3510 - val_loss: 0.5463 - val_binary_accuracy: 0.7451 - val_mae: 0.3315
Epoch 9/100
100/100 [==============================] - ETA: 0s - loss: 0.5349 - binary_accuracy: 0.7495 - mae: 0.3476
Epoch 9: val_loss improved from 0.54629 to 0.54338, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 50s 496ms/step - loss: 0.5349 - binary_accuracy: 0.7495 - mae: 0.3476 - val_loss: 0.5434 - val_binary_accuracy: 0.7451 - val_mae: 0.3318
Epoch 10/100
100/100 [==============================] - ETA: 0s - loss: 0.5336 - binary_accuracy: 0.7525 - mae: 0.3470
Epoch 10: val_loss improved from 0.54338 to 0.53805, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 48s 482ms/step - loss: 0.5336 - binary_accuracy: 0.7525 - mae: 0.3470 - val_loss: 0.5380 - val_binary_accuracy: 0.7478 - val_mae: 0.3402
Epoch 11/100
100/100 [==============================] - ETA: 0s - loss: 0.5306 - binary_accuracy: 0.7515 - mae: 0.3452
Epoch 11: val_loss did not improve from 0.53805
100/100 [==============================] - 49s 490ms/step - loss: 0.5306 - binary_accuracy: 0.7515 - mae: 0.3452 - val_loss: 0.5424 - val_binary_accuracy: 0.7463 - val_mae: 0.3335
Epoch 12/100
100/100 [==============================] - ETA: 0s - loss: 0.5247 - binary_accuracy: 0.7527 - mae: 0.3419
Epoch 12: val_loss improved from 0.53805 to 0.52518, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 50s 500ms/step - loss: 0.5247 - binary_accuracy: 0.7527 - mae: 0.3419 - val_loss: 0.5252 - val_binary_accuracy: 0.7549 - val_mae: 0.3373
Epoch 13/100
100/100 [==============================] - ETA: 0s - loss: 0.5296 - binary_accuracy: 0.7504 - mae: 0.3447
Epoch 13: val_loss did not improve from 0.52518
100/100 [==============================] - 49s 493ms/step - loss: 0.5296 - binary_accuracy: 0.7504 - mae: 0.3447 - val_loss: 0.5561 - val_binary_accuracy: 0.7466 - val_mae: 0.3450
Epoch 14/100
100/100 [==============================] - ETA: 0s - loss: 0.5272 - binary_accuracy: 0.7513 - mae: 0.3440
Epoch 14: val_loss did not improve from 0.52518
100/100 [==============================] - 48s 476ms/step - loss: 0.5272 - binary_accuracy: 0.7513 - mae: 0.3440 - val_loss: 0.5456 - val_binary_accuracy: 0.7498 - val_mae: 0.3458
Epoch 15/100
100/100 [==============================] - ETA: 0s - loss: 0.5213 - binary_accuracy: 0.7511 - mae: 0.3407
Epoch 15: val_loss did not improve from 0.52518
100/100 [==============================] - 48s 472ms/step - loss: 0.5213 - binary_accuracy: 0.7511 - mae: 0.3407 - val_loss: 0.5473 - val_binary_accuracy: 0.7480 - val_mae: 0.3305
Epoch 16/100
100/100 [==============================] - ETA: 0s - loss: 0.5279 - binary_accuracy: 0.7497 - mae: 0.3451
Epoch 16: val_loss improved from 0.52518 to 0.52364, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 49s 487ms/step - loss: 0.5279 - binary_accuracy: 0.7497 - mae: 0.3451 - val_loss: 0.5236 - val_binary_accuracy: 0.7522 - val_mae: 0.3458
Epoch 17/100
100/100 [==============================] - ETA: 0s - loss: 0.5149 - binary_accuracy: 0.7566 - mae: 0.3362
Epoch 17: val_loss did not improve from 0.52364
100/100 [==============================] - 47s 471ms/step - loss: 0.5149 - binary_accuracy: 0.7566 - mae: 0.3362 - val_loss: 0.5267 - val_binary_accuracy: 0.7510 - val_mae: 0.3353
Epoch 18/100
100/100 [==============================] - ETA: 0s - loss: 0.5193 - binary_accuracy: 0.7526 - mae: 0.3383
Epoch 18: val_loss improved from 0.52364 to 0.51653, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 47s 470ms/step - loss: 0.5193 - binary_accuracy: 0.7526 - mae: 0.3383 - val_loss: 0.5165 - val_binary_accuracy: 0.7600 - val_mae: 0.3407
Epoch 19/100
100/100 [==============================] - ETA: 0s - loss: 0.5153 - binary_accuracy: 0.7565 - mae: 0.3365
Epoch 19: val_loss did not improve from 0.51653
100/100 [==============================] - 48s 477ms/step - loss: 0.5153 - binary_accuracy: 0.7565 - mae: 0.3365 - val_loss: 0.5591 - val_binary_accuracy: 0.7556 - val_mae: 0.3462
Epoch 20/100
100/100 [==============================] - ETA: 0s - loss: 0.5180 - binary_accuracy: 0.7575 - mae: 0.3388
Epoch 20: val_loss did not improve from 0.51653
100/100 [==============================] - 47s 469ms/step - loss: 0.5180 - binary_accuracy: 0.7575 - mae: 0.3388 - val_loss: 0.5210 - val_binary_accuracy: 0.7561 - val_mae: 0.3168
Epoch 21/100
100/100 [==============================] - ETA: 0s - loss: 0.5121 - binary_accuracy: 0.7562 - mae: 0.3343
Epoch 21: val_loss improved from 0.51653 to 0.51562, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 47s 468ms/step - loss: 0.5121 - binary_accuracy: 0.7562 - mae: 0.3343 - val_loss: 0.5156 - val_binary_accuracy: 0.7598 - val_mae: 0.3354
Epoch 22/100
100/100 [==============================] - ETA: 0s - loss: 0.5132 - binary_accuracy: 0.7563 - mae: 0.3353
Epoch 22: val_loss did not improve from 0.51562
100/100 [==============================] - 48s 474ms/step - loss: 0.5132 - binary_accuracy: 0.7563 - mae: 0.3353 - val_loss: 0.6093 - val_binary_accuracy: 0.6946 - val_mae: 0.3590
Epoch 23/100
100/100 [==============================] - ETA: 0s - loss: 0.5071 - binary_accuracy: 0.7586 - mae: 0.3312
Epoch 23: val_loss did not improve from 0.51562
100/100 [==============================] - 51s 504ms/step - loss: 0.5071 - binary_accuracy: 0.7586 - mae: 0.3312 - val_loss: 0.5332 - val_binary_accuracy: 0.7478 - val_mae: 0.3339
Epoch 24/100
100/100 [==============================] - ETA: 0s - loss: 0.5127 - binary_accuracy: 0.7563 - mae: 0.3346
Epoch 24: val_loss improved from 0.51562 to 0.51302, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 47s 472ms/step - loss: 0.5127 - binary_accuracy: 0.7563 - mae: 0.3346 - val_loss: 0.5130 - val_binary_accuracy: 0.7620 - val_mae: 0.3253
Epoch 25/100
100/100 [==============================] - ETA: 0s - loss: 0.5124 - binary_accuracy: 0.7570 - mae: 0.3350
Epoch 25: val_loss did not improve from 0.51302
100/100 [==============================] - 48s 477ms/step - loss: 0.5124 - binary_accuracy: 0.7570 - mae: 0.3350 - val_loss: 0.5223 - val_binary_accuracy: 0.7581 - val_mae: 0.3387
Epoch 26/100
100/100 [==============================] - ETA: 0s - loss: 0.5100 - binary_accuracy: 0.7600 - mae: 0.3329
Epoch 26: val_loss did not improve from 0.51302
100/100 [==============================] - 47s 470ms/step - loss: 0.5100 - binary_accuracy: 0.7600 - mae: 0.3329 - val_loss: 0.5401 - val_binary_accuracy: 0.7488 - val_mae: 0.3368
Epoch 27/100
100/100 [==============================] - ETA: 0s - loss: 0.5104 - binary_accuracy: 0.7575 - mae: 0.3336
Epoch 27: val_loss improved from 0.51302 to 0.50762, saving model to xray_class_weights.best.hdf5
100/100 [==============================] - 49s 484ms/step - loss: 0.5104 - binary_accuracy: 0.7575 - mae: 0.3336 - val_loss: 0.5076 - val_binary_accuracy: 0.7544 - val_mae: 0.3321
Epoch 28/100
100/100 [==============================] - ETA: 0s - loss: 0.4989 - binary_accuracy: 0.7679 - mae: 0.3241
Epoch 28: val_loss did not improve from 0.50762
100/100 [==============================] - 48s 476ms/step - loss: 0.4989 - binary_accuracy: 0.7679 - mae: 0.3241 - val_loss: 0.5459 - val_binary_accuracy: 0.7466 - val_mae: 0.3327
Epoch 29/100
100/100 [==============================] - ETA: 0s - loss: 0.5056 - binary_accuracy: 0.7591 - mae: 0.3297
Epoch 29: val_loss did not improve from 0.50762
100/100 [==============================] - 46s 455ms/step - loss: 0.5056 - binary_accuracy: 0.7591 - mae: 0.3297 - val_loss: 0.5556 - val_binary_accuracy: 0.7429 - val_mae: 0.3293
Epoch 30/100
100/100 [==============================] - ETA: 0s - loss: 0.5079 - binary_accuracy: 0.7623 - mae: 0.3304
Epoch 30: val_loss did not improve from 0.50762
100/100 [==============================] - 47s 468ms/step - loss: 0.5079 - binary_accuracy: 0.7623 - mae: 0.3304 - val_loss: 0.5544 - val_binary_accuracy: 0.7385 - val_mae: 0.3364
Epoch 31/100
100/100 [==============================] - ETA: 0s - loss: 0.5045 - binary_accuracy: 0.7628 - mae: 0.3299
Epoch 31: val_loss did not improve from 0.50762
100/100 [==============================] - 47s 470ms/step - loss: 0.5045 - binary_accuracy: 0.7628 - mae: 0.3299 - val_loss: 0.5746 - val_binary_accuracy: 0.7405 - val_mae: 0.3175
Epoch 32/100
100/100 [==============================] - ETA: 0s - loss: 0.5017 - binary_accuracy: 0.7643 - mae: 0.3253
Epoch 32: val_loss did not improve from 0.50762
100/100 [==============================] - 46s 452ms/step - loss: 0.5017 - binary_accuracy: 0.7643 - mae: 0.3253 - val_loss: 0.5126 - val_binary_accuracy: 0.7605 - val_mae: 0.3154
Epoch 33/100
100/100 [==============================] - ETA: 0s - loss: 0.5008 - binary_accuracy: 0.7659 - mae: 0.3255
Epoch 33: val_loss did not improve from 0.50762
100/100 [==============================] - 46s 459ms/step - loss: 0.5008 - binary_accuracy: 0.7659 - mae: 0.3255 - val_loss: 0.5158 - val_binary_accuracy: 0.7510 - val_mae: 0.3258
Epoch 34/100
100/100 [==============================] - ETA: 0s - loss: 0.5028 - binary_accuracy: 0.7625 - mae: 0.3285
Epoch 34: val_loss did not improve from 0.50762
100/100 [==============================] - 45s 448ms/step - loss: 0.5028 - binary_accuracy: 0.7625 - mae: 0.3285 - val_loss: 0.6025 - val_binary_accuracy: 0.7283 - val_mae: 0.3425
Epoch 35/100
100/100 [==============================] - ETA: 0s - loss: 0.5073 - binary_accuracy: 0.7613 - mae: 0.3285
Epoch 35: val_loss did not improve from 0.50762
100/100 [==============================] - 45s 450ms/step - loss: 0.5073 - binary_accuracy: 0.7613 - mae: 0.3285 - val_loss: 0.5345 - val_binary_accuracy: 0.7512 - val_mae: 0.3381
Epoch 36/100
100/100 [==============================] - ETA: 0s - loss: 0.5003 - binary_accuracy: 0.7654 - mae: 0.3255
Epoch 36: val_loss did not improve from 0.50762
100/100 [==============================] - 44s 436ms/step - loss: 0.5003 - binary_accuracy: 0.7654 - mae: 0.3255 - val_loss: 0.5392 - val_binary_accuracy: 0.7476 - val_mae: 0.3333
Epoch 37/100
100/100 [==============================] - ETA: 0s - loss: 0.5026 - binary_accuracy: 0.7612 - mae: 0.3274
Epoch 37: val_loss did not improve from 0.50762
100/100 [==============================] - 45s 445ms/step - loss: 0.5026 - binary_accuracy: 0.7612 - mae: 0.3274 - val_loss: 0.5177 - val_binary_accuracy: 0.7502 - val_mae: 0.3323
32/32 [==============================] - 2s 66ms/step
No description has been provided for this image
No description has been provided for this image

Swarm¶

InĀ [87]:
import numpy as np
from keras.models import Model, clone_model, Sequential
from keras.preprocessing.image import ImageDataGenerator
from typing import List, Dict, Tuple
import tensorflow as tf
from keras.applications.mobilenet import MobileNet
from keras.layers import GlobalAveragePooling2D, Dense, Dropout

class SwarmMerger:
    def __init__(self, 
                 input_shape,
                 num_classes,
                 img_size,
                 core_idg: ImageDataGenerator):
        """
        Initialize the swarm merger for weighted parameter averaging
        
        Args:
            input_shape: Input shape for the model (excluding batch size)
            num_classes: Number of classes for classification
            img_size: Tuple of (height, width) for images
            core_idg: Core ImageDataGenerator for data augmentation
        """
        self.input_shape = input_shape
        self.num_classes = num_classes
        self.img_size = img_size
        self.core_idg = core_idg
        self.base_model = self._create_base_model()
        
    def _create_base_model(self) -> Model:
        """Create the base MobileNet model architecture"""
        base_mobilenet_model = MobileNet(
            input_shape=self.input_shape,
            include_top=False, 
            weights=None
        )
        
        model = Sequential([
            base_mobilenet_model,
            GlobalAveragePooling2D(),
            Dropout(0.5),
            Dense(512),
            Dropout(0.5),
            Dense(self.num_classes, activation='sigmoid')
        ])
        
        model.compile(
            optimizer='adam',
            loss='binary_crossentropy',
            metrics=['binary_accuracy', 'mae']
        )
        
        return model
    
    def normalize_weights(self, weights: List[float]) -> np.ndarray:
        """Normalize weights to sum to 1"""
        weights = np.array(weights)
        return weights / np.sum(weights)
    
    def merge_parameters(self, 
                        node_models: List[Model], 
                        sample_counts: List[int] = None) -> Model:
        """
        Merge parameters from multiple models using weighted averaging
        
        Args:
            node_models: List of trained Keras models with identical architecture
            sample_counts: Number of samples each node was trained on (for weighting)
            
        Returns:
            Merged model with averaged parameters
        """
        if sample_counts is None:
            weights = [1.0] * len(node_models)
        else:
            weights = self.normalize_weights(sample_counts)
            
        merged_model = clone_model(self.base_model)
        merged_model.compile(
            optimizer='adam',
            loss='binary_crossentropy',
            metrics=['binary_accuracy', 'mae']
        )
        
        # Get the weights from each model
        all_weights = [model.get_weights() for model in node_models]
        
        # Perform weighted averaging of parameters
        merged_weights = []
        for layer_weights in zip(*all_weights):
            avg_layer_weights = sum(w * layer_weight for w, layer_weight 
                                  in zip(weights, layer_weights))
            merged_weights.append(avg_layer_weights)
            
        merged_model.set_weights(merged_weights)
        return merged_model
    
    def iterative_merge(self, 
                       node_models: List[Model],
                       node_generators: List[tf.keras.preprocessing.image.DirectoryIterator],
                       valid_generator: tf.keras.preprocessing.image.DirectoryIterator,
                       sample_counts: List[int],
                       merge_frequency: int = 5,
                       total_iterations: int = 20,
                       steps_per_epoch: int = 100) -> Model:
        """
        Perform iterative parameter merging with continued training
        
        Args:
            node_models: List of node models to merge
            node_generators: List of data generators for each node
            valid_generator: Validation data generator
            sample_counts: Training samples per node
            merge_frequency: How often to perform merging (in epochs)
            total_iterations: Total number of merge iterations
            steps_per_epoch: Number of steps per training epoch
            
        Returns:
            Final merged model
        """
        current_models = node_models
        best_val_loss = float('inf')
        best_model = None
        
        for iteration in range(total_iterations):
            print(f"\nIteration {iteration + 1}/{total_iterations}")
            
            # Merge current models
            merged_model = self.merge_parameters(current_models, sample_counts)
            
            # Evaluate merged model
            val_loss = merged_model.evaluate_generator(
                valid_generator,
                steps=len(valid_generator),
                verbose=1
            )[0]
            
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                best_model = clone_model(merged_model)
                best_model.set_weights(merged_model.get_weights())
                print(f"New best validation loss: {best_val_loss}")
            
            # Update all nodes with merged parameters
            for model in current_models:
                model.set_weights(merged_model.get_weights())
                
            # Continue training individual nodes
            if iteration < total_iterations - 1:
                for i, (model, generator) in enumerate(zip(current_models, node_generators)):
                    print(f"\nTraining Node {i + 1}")
                    model.fit_generator(
                        generator,
                        steps_per_epoch=steps_per_epoch,
                        epochs=merge_frequency,
                        validation_data=valid_generator,
                        validation_steps=len(valid_generator),
                        verbose=1
                    )
                    
        return best_model

def evaluate_model(model, test_X, test_Y, all_labels):
    """
    Evaluate the model and plot ROC curves
    
    Args:
        model: Trained model to evaluate
        test_X: Test input data
        test_Y: Test labels
        all_labels: List of class names
    """
    import matplotlib.pyplot as plt
    from sklearn.metrics import roc_curve, auc
    
    pred_Y = model.predict(test_X, batch_size=32, verbose=True)
    
    # Plot ROC curves
    fig, c_ax = plt.subplots(1, 1, figsize=(9, 9))
    for (idx, c_label) in enumerate(all_labels):
        fpr, tpr, thresholds = roc_curve(test_Y[:, idx].astype(int), pred_Y[:, idx])
        c_ax.plot(fpr, tpr, label=f'{c_label} (AUC:{auc(fpr, tpr):.2f})')
    
    c_ax.legend()
    c_ax.set_xlabel('False Positive Rate')
    c_ax.set_ylabel('True Positive Rate')
    plt.title('ROC Curves for Merged Model')
    plt.show()
    
    return pred_Y
InĀ [88]:
# Initialize the swarm merger
swarm = SwarmMerger(
    input_shape=t_x.shape[1:],  # Your image shape
    num_classes=len(all_labels),
    img_size=IMG_SIZE,
    core_idg=core_idg
)

# List of models and their corresponding generators
node_models = [node1_model, node2_model, node3_model]
node_generators = [node1_gen, node2_gen, node3_gen]

# Get sample counts from the dataframes
sample_counts = [len(node1_df), len(node2_df), len(node3_df)]

# Perform iterative merging
merged_model = swarm.iterative_merge(
    node_models=node_models,
    node_generators=node_generators,
    valid_generator=valid_gen,
    sample_counts=sample_counts,
    merge_frequency=1,
    total_iterations=100,
    steps_per_epoch=100
)

# Evaluate the merged model
pred_Y = evaluate_model(merged_model, test_X, test_Y, all_labels)
Iteration 1/100
/var/folders/w3/gtm29qhx0wj4wg_y2_jw07hw0000gn/T/ipykernel_76436/2789746691.py:132: UserWarning: `Model.evaluate_generator` is deprecated and will be removed in a future version. Please use `Model.evaluate`, which supports generators.
  val_loss = merged_model.evaluate_generator(
14/14 [==============================] - 34s 2s/step - loss: 0.5844 - binary_accuracy: 0.7500 - mae: 0.4253
New best validation loss: 0.5844358205795288

Training Node 1
/var/folders/w3/gtm29qhx0wj4wg_y2_jw07hw0000gn/T/ipykernel_76436/2789746691.py:152: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  model.fit_generator(
100/100 [==============================] - 74s 743ms/step - loss: 0.5424 - binary_accuracy: 0.7469 - mae: 0.3589 - val_loss: 0.5610 - val_binary_accuracy: 0.7500 - val_mae: 0.3530

Training Node 2
 62/100 [=================>............] - ETA: 13s - loss: 0.5495 - binary_accuracy: 0.7480 - mae: 0.3740WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 58s 576ms/step - loss: 0.5495 - binary_accuracy: 0.7480 - mae: 0.3740 - val_loss: 0.5575 - val_binary_accuracy: 0.7500 - val_mae: 0.3765

Training Node 3
100/100 [==============================] - 73s 734ms/step - loss: 0.5413 - binary_accuracy: 0.7500 - mae: 0.3554 - val_loss: 0.5647 - val_binary_accuracy: 0.7500 - val_mae: 0.3463

Iteration 2/100
14/14 [==============================] - 33s 2s/step - loss: 0.5602 - binary_accuracy: 0.7500 - mae: 0.3551
New best validation loss: 0.5602215528488159

Training Node 1
100/100 [==============================] - 69s 690ms/step - loss: 0.5305 - binary_accuracy: 0.7538 - mae: 0.3477 - val_loss: 0.5681 - val_binary_accuracy: 0.7500 - val_mae: 0.3456

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.5166 - binary_accuracy: 0.7539 - mae: 0.3393WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 60s 602ms/step - loss: 0.5166 - binary_accuracy: 0.7539 - mae: 0.3393 - val_loss: 0.5658 - val_binary_accuracy: 0.7500 - val_mae: 0.3481

Training Node 3
100/100 [==============================] - 74s 743ms/step - loss: 0.5216 - binary_accuracy: 0.7548 - mae: 0.3414 - val_loss: 0.5671 - val_binary_accuracy: 0.7500 - val_mae: 0.3433

Iteration 3/100
14/14 [==============================] - 33s 2s/step - loss: 0.5658 - binary_accuracy: 0.7500 - mae: 0.3460

Training Node 1
100/100 [==============================] - 70s 704ms/step - loss: 0.5225 - binary_accuracy: 0.7523 - mae: 0.3441 - val_loss: 0.5687 - val_binary_accuracy: 0.7500 - val_mae: 0.3422

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.5128 - binary_accuracy: 0.7561 - mae: 0.3394WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 63s 627ms/step - loss: 0.5128 - binary_accuracy: 0.7561 - mae: 0.3394 - val_loss: 0.5715 - val_binary_accuracy: 0.7500 - val_mae: 0.3397

Training Node 3
100/100 [==============================] - 73s 736ms/step - loss: 0.5207 - binary_accuracy: 0.7561 - mae: 0.3415 - val_loss: 0.5869 - val_binary_accuracy: 0.7500 - val_mae: 0.3369

Iteration 4/100
14/14 [==============================] - 33s 2s/step - loss: 0.5700 - binary_accuracy: 0.7500 - mae: 0.3367

Training Node 1
100/100 [==============================] - 68s 680ms/step - loss: 0.5202 - binary_accuracy: 0.7541 - mae: 0.3410 - val_loss: 0.5447 - val_binary_accuracy: 0.7500 - val_mae: 0.3449

Training Node 2
 62/100 [=================>............] - ETA: 13s - loss: 0.5068 - binary_accuracy: 0.7600 - mae: 0.3336WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 57s 574ms/step - loss: 0.5068 - binary_accuracy: 0.7600 - mae: 0.3336 - val_loss: 0.5587 - val_binary_accuracy: 0.7500 - val_mae: 0.3426

Training Node 3
100/100 [==============================] - 77s 772ms/step - loss: 0.5142 - binary_accuracy: 0.7548 - mae: 0.3365 - val_loss: 0.5497 - val_binary_accuracy: 0.7500 - val_mae: 0.3441

Iteration 5/100
14/14 [==============================] - 33s 2s/step - loss: 0.5569 - binary_accuracy: 0.7500 - mae: 0.3491
New best validation loss: 0.5568915605545044

Training Node 1
100/100 [==============================] - 68s 677ms/step - loss: 0.5162 - binary_accuracy: 0.7538 - mae: 0.3385 - val_loss: 0.5313 - val_binary_accuracy: 0.7526 - val_mae: 0.3517

Training Node 2
 62/100 [=================>............] - ETA: 12s - loss: 0.5054 - binary_accuracy: 0.7571 - mae: 0.3350WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 55s 554ms/step - loss: 0.5054 - binary_accuracy: 0.7571 - mae: 0.3350 - val_loss: 0.5300 - val_binary_accuracy: 0.7518 - val_mae: 0.3421

Training Node 3
100/100 [==============================] - 77s 768ms/step - loss: 0.5138 - binary_accuracy: 0.7565 - mae: 0.3374 - val_loss: 0.5550 - val_binary_accuracy: 0.7277 - val_mae: 0.3471

Iteration 6/100
14/14 [==============================] - 33s 2s/step - loss: 0.5219 - binary_accuracy: 0.7528 - mae: 0.3440
New best validation loss: 0.5218707919120789

Training Node 1
100/100 [==============================] - 68s 678ms/step - loss: 0.5086 - binary_accuracy: 0.7602 - mae: 0.3326 - val_loss: 0.5239 - val_binary_accuracy: 0.7561 - val_mae: 0.3222

Training Node 2
 62/100 [=================>............] - ETA: 12s - loss: 0.5018 - binary_accuracy: 0.7613 - mae: 0.3303WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 55s 550ms/step - loss: 0.5018 - binary_accuracy: 0.7613 - mae: 0.3303 - val_loss: 0.5001 - val_binary_accuracy: 0.7597 - val_mae: 0.3285

Training Node 3
100/100 [==============================] - 75s 753ms/step - loss: 0.5103 - binary_accuracy: 0.7606 - mae: 0.3327 - val_loss: 0.5291 - val_binary_accuracy: 0.7504 - val_mae: 0.3316

Iteration 7/100
14/14 [==============================] - 33s 2s/step - loss: 0.5139 - binary_accuracy: 0.7578 - mae: 0.3272
New best validation loss: 0.513934850692749

Training Node 1
100/100 [==============================] - 71s 707ms/step - loss: 0.5110 - binary_accuracy: 0.7538 - mae: 0.3355 - val_loss: 0.5310 - val_binary_accuracy: 0.7560 - val_mae: 0.3330

Training Node 2
 62/100 [=================>............] - ETA: 12s - loss: 0.4981 - binary_accuracy: 0.7639 - mae: 0.3267WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 55s 549ms/step - loss: 0.4981 - binary_accuracy: 0.7639 - mae: 0.3267 - val_loss: 0.5015 - val_binary_accuracy: 0.7630 - val_mae: 0.3252

Training Node 3
100/100 [==============================] - 71s 714ms/step - loss: 0.5046 - binary_accuracy: 0.7620 - mae: 0.3301 - val_loss: 0.5470 - val_binary_accuracy: 0.7432 - val_mae: 0.3262

Iteration 8/100
14/14 [==============================] - 33s 2s/step - loss: 0.5080 - binary_accuracy: 0.7557 - mae: 0.3222
New best validation loss: 0.5079642534255981

Training Node 1
100/100 [==============================] - 68s 678ms/step - loss: 0.5015 - binary_accuracy: 0.7632 - mae: 0.3281 - val_loss: 0.5756 - val_binary_accuracy: 0.7369 - val_mae: 0.3518

Training Node 2
 62/100 [=================>............] - ETA: 13s - loss: 0.4991 - binary_accuracy: 0.7648 - mae: 0.3283WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 56s 565ms/step - loss: 0.4991 - binary_accuracy: 0.7648 - mae: 0.3283 - val_loss: 0.4950 - val_binary_accuracy: 0.7667 - val_mae: 0.3195

Training Node 3
100/100 [==============================] - 75s 747ms/step - loss: 0.5033 - binary_accuracy: 0.7605 - mae: 0.3294 - val_loss: 0.5479 - val_binary_accuracy: 0.7535 - val_mae: 0.3350

Iteration 9/100
14/14 [==============================] - 34s 2s/step - loss: 0.5108 - binary_accuracy: 0.7610 - mae: 0.3310

Training Node 1
100/100 [==============================] - 69s 691ms/step - loss: 0.5014 - binary_accuracy: 0.7643 - mae: 0.3275 - val_loss: 0.5116 - val_binary_accuracy: 0.7599 - val_mae: 0.3387

Training Node 2
 62/100 [=================>............] - ETA: 13s - loss: 0.5041 - binary_accuracy: 0.7573 - mae: 0.3316WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 58s 579ms/step - loss: 0.5041 - binary_accuracy: 0.7573 - mae: 0.3316 - val_loss: 0.4929 - val_binary_accuracy: 0.7648 - val_mae: 0.3154

Training Node 3
100/100 [==============================] - 82s 818ms/step - loss: 0.5037 - binary_accuracy: 0.7625 - mae: 0.3294 - val_loss: 0.5316 - val_binary_accuracy: 0.7506 - val_mae: 0.3330

Iteration 10/100
14/14 [==============================] - 33s 2s/step - loss: 0.4981 - binary_accuracy: 0.7615 - mae: 0.3255
New best validation loss: 0.49813535809516907

Training Node 1
100/100 [==============================] - 73s 736ms/step - loss: 0.5018 - binary_accuracy: 0.7634 - mae: 0.3282 - val_loss: 0.5383 - val_binary_accuracy: 0.7501 - val_mae: 0.3430

Training Node 2
 62/100 [=================>............] - ETA: 14s - loss: 0.4954 - binary_accuracy: 0.7648 - mae: 0.3241WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 60s 598ms/step - loss: 0.4954 - binary_accuracy: 0.7648 - mae: 0.3241 - val_loss: 0.4937 - val_binary_accuracy: 0.7664 - val_mae: 0.3215

Training Node 3
100/100 [==============================] - 79s 797ms/step - loss: 0.4982 - binary_accuracy: 0.7652 - mae: 0.3255 - val_loss: 0.5467 - val_binary_accuracy: 0.7474 - val_mae: 0.3077

Iteration 11/100
14/14 [==============================] - 34s 2s/step - loss: 0.4992 - binary_accuracy: 0.7632 - mae: 0.3116

Training Node 1
100/100 [==============================] - 78s 782ms/step - loss: 0.5001 - binary_accuracy: 0.7631 - mae: 0.3265 - val_loss: 0.5238 - val_binary_accuracy: 0.7463 - val_mae: 0.3218

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4950 - binary_accuracy: 0.7683 - mae: 0.3243WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 63s 632ms/step - loss: 0.4950 - binary_accuracy: 0.7683 - mae: 0.3243 - val_loss: 0.4943 - val_binary_accuracy: 0.7598 - val_mae: 0.3129

Training Node 3
100/100 [==============================] - 81s 807ms/step - loss: 0.5021 - binary_accuracy: 0.7624 - mae: 0.3290 - val_loss: 0.6275 - val_binary_accuracy: 0.6877 - val_mae: 0.3630

Iteration 12/100
14/14 [==============================] - 34s 2s/step - loss: 0.5026 - binary_accuracy: 0.7518 - mae: 0.3237

Training Node 1
100/100 [==============================] - 82s 823ms/step - loss: 0.5019 - binary_accuracy: 0.7619 - mae: 0.3282 - val_loss: 0.5495 - val_binary_accuracy: 0.7427 - val_mae: 0.3471

Training Node 2
 62/100 [=================>............] - ETA: 19s - loss: 0.4970 - binary_accuracy: 0.7621 - mae: 0.3248WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 71s 709ms/step - loss: 0.4970 - binary_accuracy: 0.7621 - mae: 0.3248 - val_loss: 0.4957 - val_binary_accuracy: 0.7610 - val_mae: 0.3176

Training Node 3
100/100 [==============================] - 81s 810ms/step - loss: 0.5004 - binary_accuracy: 0.7641 - mae: 0.3255 - val_loss: 0.5332 - val_binary_accuracy: 0.7451 - val_mae: 0.3347

Iteration 13/100
14/14 [==============================] - 33s 2s/step - loss: 0.5074 - binary_accuracy: 0.7539 - mae: 0.3232

Training Node 1
100/100 [==============================] - 77s 771ms/step - loss: 0.4945 - binary_accuracy: 0.7671 - mae: 0.3225 - val_loss: 0.5321 - val_binary_accuracy: 0.7396 - val_mae: 0.3385

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4968 - binary_accuracy: 0.7659 - mae: 0.3272WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 61s 611ms/step - loss: 0.4968 - binary_accuracy: 0.7659 - mae: 0.3272 - val_loss: 0.5029 - val_binary_accuracy: 0.7596 - val_mae: 0.3161

Training Node 3
100/100 [==============================] - 78s 787ms/step - loss: 0.5038 - binary_accuracy: 0.7593 - mae: 0.3300 - val_loss: 0.5353 - val_binary_accuracy: 0.7477 - val_mae: 0.3186

Iteration 14/100
14/14 [==============================] - 34s 2s/step - loss: 0.4962 - binary_accuracy: 0.7657 - mae: 0.3147
New best validation loss: 0.49624940752983093

Training Node 1
100/100 [==============================] - 77s 775ms/step - loss: 0.4975 - binary_accuracy: 0.7652 - mae: 0.3256 - val_loss: 0.5087 - val_binary_accuracy: 0.7526 - val_mae: 0.3335

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4918 - binary_accuracy: 0.7719 - mae: 0.3210WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 61s 613ms/step - loss: 0.4918 - binary_accuracy: 0.7719 - mae: 0.3210 - val_loss: 0.4870 - val_binary_accuracy: 0.7688 - val_mae: 0.3172

Training Node 3
100/100 [==============================] - 78s 784ms/step - loss: 0.4925 - binary_accuracy: 0.7696 - mae: 0.3209 - val_loss: 0.5641 - val_binary_accuracy: 0.7424 - val_mae: 0.3264

Iteration 15/100
14/14 [==============================] - 34s 2s/step - loss: 0.5007 - binary_accuracy: 0.7612 - mae: 0.3145

Training Node 1
100/100 [==============================] - 76s 763ms/step - loss: 0.4944 - binary_accuracy: 0.7658 - mae: 0.3241 - val_loss: 0.5083 - val_binary_accuracy: 0.7590 - val_mae: 0.3182

Training Node 2
 62/100 [=================>............] - ETA: 14s - loss: 0.4865 - binary_accuracy: 0.7733 - mae: 0.3170WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 63s 630ms/step - loss: 0.4865 - binary_accuracy: 0.7733 - mae: 0.3170 - val_loss: 0.4850 - val_binary_accuracy: 0.7706 - val_mae: 0.3149

Training Node 3
100/100 [==============================] - 85s 856ms/step - loss: 0.4961 - binary_accuracy: 0.7640 - mae: 0.3237 - val_loss: 0.5047 - val_binary_accuracy: 0.7580 - val_mae: 0.3203

Iteration 16/100
14/14 [==============================] - 35s 2s/step - loss: 0.4911 - binary_accuracy: 0.7668 - mae: 0.3123
New best validation loss: 0.4911210536956787

Training Node 1
100/100 [==============================] - 85s 849ms/step - loss: 0.4945 - binary_accuracy: 0.7677 - mae: 0.3236 - val_loss: 0.5335 - val_binary_accuracy: 0.7616 - val_mae: 0.3258

Training Node 2
 62/100 [=================>............] - ETA: 17s - loss: 0.4917 - binary_accuracy: 0.7716 - mae: 0.3219WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 65s 650ms/step - loss: 0.4917 - binary_accuracy: 0.7716 - mae: 0.3219 - val_loss: 0.4877 - val_binary_accuracy: 0.7706 - val_mae: 0.3106

Training Node 3
100/100 [==============================] - 82s 820ms/step - loss: 0.4921 - binary_accuracy: 0.7708 - mae: 0.3198 - val_loss: 0.5152 - val_binary_accuracy: 0.7609 - val_mae: 0.3281

Iteration 17/100
14/14 [==============================] - 35s 2s/step - loss: 0.4960 - binary_accuracy: 0.7640 - mae: 0.3180

Training Node 1
100/100 [==============================] - 83s 826ms/step - loss: 0.4954 - binary_accuracy: 0.7658 - mae: 0.3223 - val_loss: 0.5371 - val_binary_accuracy: 0.7481 - val_mae: 0.3387

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4911 - binary_accuracy: 0.7697 - mae: 0.3214WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 65s 654ms/step - loss: 0.4911 - binary_accuracy: 0.7697 - mae: 0.3214 - val_loss: 0.4840 - val_binary_accuracy: 0.7716 - val_mae: 0.3131

Training Node 3
100/100 [==============================] - 83s 832ms/step - loss: 0.4903 - binary_accuracy: 0.7681 - mae: 0.3199 - val_loss: 0.5151 - val_binary_accuracy: 0.7542 - val_mae: 0.3150

Iteration 18/100
14/14 [==============================] - 33s 2s/step - loss: 0.4920 - binary_accuracy: 0.7679 - mae: 0.3102

Training Node 1
100/100 [==============================] - 73s 736ms/step - loss: 0.4945 - binary_accuracy: 0.7658 - mae: 0.3231 - val_loss: 0.5234 - val_binary_accuracy: 0.7565 - val_mae: 0.3225

Training Node 2
 62/100 [=================>............] - ETA: 17s - loss: 0.4857 - binary_accuracy: 0.7769 - mae: 0.3168WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 65s 646ms/step - loss: 0.4857 - binary_accuracy: 0.7769 - mae: 0.3168 - val_loss: 0.4831 - val_binary_accuracy: 0.7703 - val_mae: 0.3044

Training Node 3
100/100 [==============================] - 80s 805ms/step - loss: 0.4922 - binary_accuracy: 0.7695 - mae: 0.3193 - val_loss: 0.5134 - val_binary_accuracy: 0.7585 - val_mae: 0.3120

Iteration 19/100
14/14 [==============================] - 33s 2s/step - loss: 0.4885 - binary_accuracy: 0.7667 - mae: 0.3064
New best validation loss: 0.4884701669216156

Training Node 1
100/100 [==============================] - 76s 765ms/step - loss: 0.4945 - binary_accuracy: 0.7626 - mae: 0.3225 - val_loss: 0.5094 - val_binary_accuracy: 0.7489 - val_mae: 0.3222

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4858 - binary_accuracy: 0.7666 - mae: 0.3168WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 61s 614ms/step - loss: 0.4858 - binary_accuracy: 0.7666 - mae: 0.3168 - val_loss: 0.4837 - val_binary_accuracy: 0.7695 - val_mae: 0.3163

Training Node 3
100/100 [==============================] - 77s 769ms/step - loss: 0.4917 - binary_accuracy: 0.7652 - mae: 0.3209 - val_loss: 0.4907 - val_binary_accuracy: 0.7699 - val_mae: 0.3176

Iteration 20/100
14/14 [==============================] - 33s 2s/step - loss: 0.4804 - binary_accuracy: 0.7724 - mae: 0.3145
New best validation loss: 0.4804385304450989

Training Node 1
100/100 [==============================] - 76s 761ms/step - loss: 0.4889 - binary_accuracy: 0.7713 - mae: 0.3186 - val_loss: 0.5191 - val_binary_accuracy: 0.7550 - val_mae: 0.3442

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4872 - binary_accuracy: 0.7703 - mae: 0.3184WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 61s 606ms/step - loss: 0.4872 - binary_accuracy: 0.7703 - mae: 0.3184 - val_loss: 0.4781 - val_binary_accuracy: 0.7714 - val_mae: 0.3110

Training Node 3
100/100 [==============================] - 78s 785ms/step - loss: 0.4866 - binary_accuracy: 0.7710 - mae: 0.3170 - val_loss: 0.8691 - val_binary_accuracy: 0.6937 - val_mae: 0.3213

Iteration 21/100
14/14 [==============================] - 33s 2s/step - loss: 0.4861 - binary_accuracy: 0.7736 - mae: 0.3068

Training Node 1
100/100 [==============================] - 73s 729ms/step - loss: 0.4911 - binary_accuracy: 0.7686 - mae: 0.3212 - val_loss: 0.5081 - val_binary_accuracy: 0.7573 - val_mae: 0.3200

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4852 - binary_accuracy: 0.7702 - mae: 0.3166WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 64s 637ms/step - loss: 0.4852 - binary_accuracy: 0.7702 - mae: 0.3166 - val_loss: 0.4843 - val_binary_accuracy: 0.7691 - val_mae: 0.3097

Training Node 3
100/100 [==============================] - 80s 804ms/step - loss: 0.4922 - binary_accuracy: 0.7689 - mae: 0.3204 - val_loss: 0.5075 - val_binary_accuracy: 0.7544 - val_mae: 0.3264

Iteration 22/100
14/14 [==============================] - 35s 2s/step - loss: 0.4849 - binary_accuracy: 0.7630 - mae: 0.3154

Training Node 1
100/100 [==============================] - 77s 777ms/step - loss: 0.4901 - binary_accuracy: 0.7689 - mae: 0.3191 - val_loss: 0.5108 - val_binary_accuracy: 0.7592 - val_mae: 0.3300

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4858 - binary_accuracy: 0.7682 - mae: 0.3165WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 60s 596ms/step - loss: 0.4858 - binary_accuracy: 0.7682 - mae: 0.3165 - val_loss: 0.4857 - val_binary_accuracy: 0.7678 - val_mae: 0.3145

Training Node 3
100/100 [==============================] - 77s 776ms/step - loss: 0.4877 - binary_accuracy: 0.7717 - mae: 0.3169 - val_loss: 0.5082 - val_binary_accuracy: 0.7572 - val_mae: 0.3168

Iteration 23/100
14/14 [==============================] - 33s 2s/step - loss: 0.4861 - binary_accuracy: 0.7674 - mae: 0.3144

Training Node 1
100/100 [==============================] - 78s 785ms/step - loss: 0.4897 - binary_accuracy: 0.7704 - mae: 0.3190 - val_loss: 0.5404 - val_binary_accuracy: 0.7488 - val_mae: 0.3081

Training Node 2
 62/100 [=================>............] - ETA: 17s - loss: 0.4847 - binary_accuracy: 0.7698 - mae: 0.3151WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 64s 638ms/step - loss: 0.4847 - binary_accuracy: 0.7698 - mae: 0.3151 - val_loss: 0.4812 - val_binary_accuracy: 0.7711 - val_mae: 0.3160

Training Node 3
100/100 [==============================] - 78s 782ms/step - loss: 0.4888 - binary_accuracy: 0.7685 - mae: 0.3182 - val_loss: 0.5220 - val_binary_accuracy: 0.7525 - val_mae: 0.3097

Iteration 24/100
14/14 [==============================] - 33s 2s/step - loss: 0.4969 - binary_accuracy: 0.7667 - mae: 0.3035

Training Node 1
100/100 [==============================] - 75s 756ms/step - loss: 0.4889 - binary_accuracy: 0.7663 - mae: 0.3175 - val_loss: 0.6151 - val_binary_accuracy: 0.7211 - val_mae: 0.3314

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4892 - binary_accuracy: 0.7695 - mae: 0.3190WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 620ms/step - loss: 0.4892 - binary_accuracy: 0.7695 - mae: 0.3190 - val_loss: 0.4740 - val_binary_accuracy: 0.7735 - val_mae: 0.3114

Training Node 3
100/100 [==============================] - 76s 767ms/step - loss: 0.4900 - binary_accuracy: 0.7700 - mae: 0.3193 - val_loss: 0.5155 - val_binary_accuracy: 0.7591 - val_mae: 0.3040

Iteration 25/100
14/14 [==============================] - 33s 2s/step - loss: 0.4873 - binary_accuracy: 0.7688 - mae: 0.3045

Training Node 1
100/100 [==============================] - 76s 765ms/step - loss: 0.4862 - binary_accuracy: 0.7680 - mae: 0.3168 - val_loss: 0.4925 - val_binary_accuracy: 0.7698 - val_mae: 0.3102

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4828 - binary_accuracy: 0.7746 - mae: 0.3139WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 61s 610ms/step - loss: 0.4828 - binary_accuracy: 0.7746 - mae: 0.3139 - val_loss: 0.4902 - val_binary_accuracy: 0.7632 - val_mae: 0.3112

Training Node 3
100/100 [==============================] - 78s 780ms/step - loss: 0.4869 - binary_accuracy: 0.7709 - mae: 0.3166 - val_loss: 0.5034 - val_binary_accuracy: 0.7595 - val_mae: 0.3158

Iteration 26/100
14/14 [==============================] - 33s 2s/step - loss: 0.4817 - binary_accuracy: 0.7727 - mae: 0.3093

Training Node 1
100/100 [==============================] - 76s 760ms/step - loss: 0.4838 - binary_accuracy: 0.7727 - mae: 0.3153 - val_loss: 0.5531 - val_binary_accuracy: 0.7396 - val_mae: 0.3351

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4865 - binary_accuracy: 0.7714 - mae: 0.3157WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 61s 614ms/step - loss: 0.4865 - binary_accuracy: 0.7714 - mae: 0.3157 - val_loss: 0.4771 - val_binary_accuracy: 0.7760 - val_mae: 0.3088

Training Node 3
100/100 [==============================] - 77s 770ms/step - loss: 0.4884 - binary_accuracy: 0.7685 - mae: 0.3175 - val_loss: 0.4890 - val_binary_accuracy: 0.7655 - val_mae: 0.3142

Iteration 27/100
14/14 [==============================] - 33s 2s/step - loss: 0.4755 - binary_accuracy: 0.7746 - mae: 0.3119
New best validation loss: 0.4755436182022095

Training Node 1
100/100 [==============================] - 76s 758ms/step - loss: 0.4835 - binary_accuracy: 0.7714 - mae: 0.3143 - val_loss: 0.4879 - val_binary_accuracy: 0.7688 - val_mae: 0.3206

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4842 - binary_accuracy: 0.7723 - mae: 0.3156WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 61s 609ms/step - loss: 0.4842 - binary_accuracy: 0.7723 - mae: 0.3156 - val_loss: 0.4779 - val_binary_accuracy: 0.7739 - val_mae: 0.3117

Training Node 3
100/100 [==============================] - 79s 790ms/step - loss: 0.4826 - binary_accuracy: 0.7739 - mae: 0.3131 - val_loss: 0.5301 - val_binary_accuracy: 0.7475 - val_mae: 0.3156

Iteration 28/100
14/14 [==============================] - 33s 2s/step - loss: 0.4869 - binary_accuracy: 0.7691 - mae: 0.3098

Training Node 1
100/100 [==============================] - 80s 800ms/step - loss: 0.4846 - binary_accuracy: 0.7726 - mae: 0.3153 - val_loss: 0.5020 - val_binary_accuracy: 0.7629 - val_mae: 0.3206

Training Node 2
 62/100 [=================>............] - ETA: 19s - loss: 0.4844 - binary_accuracy: 0.7687 - mae: 0.3161WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 67s 669ms/step - loss: 0.4844 - binary_accuracy: 0.7687 - mae: 0.3161 - val_loss: 0.4901 - val_binary_accuracy: 0.7672 - val_mae: 0.3116

Training Node 3
100/100 [==============================] - 79s 788ms/step - loss: 0.4846 - binary_accuracy: 0.7724 - mae: 0.3158 - val_loss: 0.5163 - val_binary_accuracy: 0.7590 - val_mae: 0.2967

Iteration 29/100
14/14 [==============================] - 33s 2s/step - loss: 0.4779 - binary_accuracy: 0.7727 - mae: 0.3022

Training Node 1
100/100 [==============================] - 76s 762ms/step - loss: 0.4795 - binary_accuracy: 0.7702 - mae: 0.3118 - val_loss: 0.5495 - val_binary_accuracy: 0.7408 - val_mae: 0.3410

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4770 - binary_accuracy: 0.7755 - mae: 0.3108WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 617ms/step - loss: 0.4770 - binary_accuracy: 0.7755 - mae: 0.3108 - val_loss: 0.4820 - val_binary_accuracy: 0.7742 - val_mae: 0.3069

Training Node 3
100/100 [==============================] - 79s 796ms/step - loss: 0.4828 - binary_accuracy: 0.7727 - mae: 0.3140 - val_loss: 0.5250 - val_binary_accuracy: 0.7560 - val_mae: 0.3187

Iteration 30/100
14/14 [==============================] - 33s 2s/step - loss: 0.4869 - binary_accuracy: 0.7674 - mae: 0.3094

Training Node 1
100/100 [==============================] - 76s 766ms/step - loss: 0.4837 - binary_accuracy: 0.7737 - mae: 0.3140 - val_loss: 0.5093 - val_binary_accuracy: 0.7529 - val_mae: 0.3199

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4822 - binary_accuracy: 0.7707 - mae: 0.3136WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 61s 611ms/step - loss: 0.4822 - binary_accuracy: 0.7707 - mae: 0.3136 - val_loss: 0.4790 - val_binary_accuracy: 0.7734 - val_mae: 0.3117

Training Node 3
100/100 [==============================] - 78s 784ms/step - loss: 0.4815 - binary_accuracy: 0.7727 - mae: 0.3138 - val_loss: 0.5097 - val_binary_accuracy: 0.7659 - val_mae: 0.2955

Iteration 31/100
14/14 [==============================] - 33s 2s/step - loss: 0.4801 - binary_accuracy: 0.7727 - mae: 0.2979

Training Node 1
100/100 [==============================] - 76s 761ms/step - loss: 0.4803 - binary_accuracy: 0.7730 - mae: 0.3139 - val_loss: 0.5000 - val_binary_accuracy: 0.7650 - val_mae: 0.3201

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4815 - binary_accuracy: 0.7732 - mae: 0.3130WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 63s 628ms/step - loss: 0.4815 - binary_accuracy: 0.7732 - mae: 0.3130 - val_loss: 0.4797 - val_binary_accuracy: 0.7751 - val_mae: 0.3114

Training Node 3
100/100 [==============================] - 79s 789ms/step - loss: 0.4809 - binary_accuracy: 0.7784 - mae: 0.3118 - val_loss: 0.5178 - val_binary_accuracy: 0.7615 - val_mae: 0.3150

Iteration 32/100
14/14 [==============================] - 34s 2s/step - loss: 0.4757 - binary_accuracy: 0.7765 - mae: 0.3042

Training Node 1
100/100 [==============================] - 78s 786ms/step - loss: 0.4805 - binary_accuracy: 0.7740 - mae: 0.3124 - val_loss: 0.5180 - val_binary_accuracy: 0.7550 - val_mae: 0.3271

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4802 - binary_accuracy: 0.7753 - mae: 0.3132WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 60s 605ms/step - loss: 0.4802 - binary_accuracy: 0.7753 - mae: 0.3132 - val_loss: 0.4768 - val_binary_accuracy: 0.7753 - val_mae: 0.3082

Training Node 3
100/100 [==============================] - 80s 803ms/step - loss: 0.4760 - binary_accuracy: 0.7796 - mae: 0.3080 - val_loss: 0.5176 - val_binary_accuracy: 0.7626 - val_mae: 0.3060

Iteration 33/100
14/14 [==============================] - 33s 2s/step - loss: 0.4898 - binary_accuracy: 0.7708 - mae: 0.3051

Training Node 1
100/100 [==============================] - 76s 766ms/step - loss: 0.4779 - binary_accuracy: 0.7764 - mae: 0.3110 - val_loss: 0.5416 - val_binary_accuracy: 0.7388 - val_mae: 0.3205

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4780 - binary_accuracy: 0.7776 - mae: 0.3095WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 621ms/step - loss: 0.4780 - binary_accuracy: 0.7776 - mae: 0.3095 - val_loss: 0.4790 - val_binary_accuracy: 0.7704 - val_mae: 0.3128

Training Node 3
100/100 [==============================] - 77s 774ms/step - loss: 0.4814 - binary_accuracy: 0.7722 - mae: 0.3124 - val_loss: 0.5152 - val_binary_accuracy: 0.7566 - val_mae: 0.3100

Iteration 34/100
14/14 [==============================] - 33s 2s/step - loss: 0.4781 - binary_accuracy: 0.7717 - mae: 0.3027

Training Node 1
100/100 [==============================] - 76s 766ms/step - loss: 0.4850 - binary_accuracy: 0.7723 - mae: 0.3151 - val_loss: 0.5342 - val_binary_accuracy: 0.7529 - val_mae: 0.3180

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4788 - binary_accuracy: 0.7750 - mae: 0.3112WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 625ms/step - loss: 0.4788 - binary_accuracy: 0.7750 - mae: 0.3112 - val_loss: 0.4718 - val_binary_accuracy: 0.7767 - val_mae: 0.3054

Training Node 3
100/100 [==============================] - 78s 777ms/step - loss: 0.4827 - binary_accuracy: 0.7731 - mae: 0.3139 - val_loss: 0.4920 - val_binary_accuracy: 0.7677 - val_mae: 0.3152

Iteration 35/100
14/14 [==============================] - 33s 2s/step - loss: 0.4754 - binary_accuracy: 0.7748 - mae: 0.3083
New best validation loss: 0.47536876797676086

Training Node 1
100/100 [==============================] - 76s 759ms/step - loss: 0.4833 - binary_accuracy: 0.7698 - mae: 0.3151 - val_loss: 0.4989 - val_binary_accuracy: 0.7649 - val_mae: 0.3235

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4775 - binary_accuracy: 0.7755 - mae: 0.3100WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 65s 649ms/step - loss: 0.4775 - binary_accuracy: 0.7755 - mae: 0.3100 - val_loss: 0.4806 - val_binary_accuracy: 0.7685 - val_mae: 0.3090

Training Node 3
100/100 [==============================] - 81s 816ms/step - loss: 0.4846 - binary_accuracy: 0.7722 - mae: 0.3153 - val_loss: 0.4932 - val_binary_accuracy: 0.7586 - val_mae: 0.3142

Iteration 36/100
14/14 [==============================] - 34s 2s/step - loss: 0.4751 - binary_accuracy: 0.7740 - mae: 0.3101
New best validation loss: 0.4750751256942749

Training Node 1
100/100 [==============================] - 79s 788ms/step - loss: 0.4785 - binary_accuracy: 0.7721 - mae: 0.3118 - val_loss: 0.5122 - val_binary_accuracy: 0.7594 - val_mae: 0.3172

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4768 - binary_accuracy: 0.7743 - mae: 0.3099WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 622ms/step - loss: 0.4768 - binary_accuracy: 0.7743 - mae: 0.3099 - val_loss: 0.4786 - val_binary_accuracy: 0.7748 - val_mae: 0.3119

Training Node 3
100/100 [==============================] - 78s 779ms/step - loss: 0.4789 - binary_accuracy: 0.7746 - mae: 0.3110 - val_loss: 0.4830 - val_binary_accuracy: 0.7723 - val_mae: 0.3102

Iteration 37/100
14/14 [==============================] - 34s 2s/step - loss: 0.4783 - binary_accuracy: 0.7756 - mae: 0.3077

Training Node 1
100/100 [==============================] - 79s 797ms/step - loss: 0.4792 - binary_accuracy: 0.7765 - mae: 0.3126 - val_loss: 0.5103 - val_binary_accuracy: 0.7552 - val_mae: 0.3110

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4792 - binary_accuracy: 0.7740 - mae: 0.3117WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 61s 612ms/step - loss: 0.4792 - binary_accuracy: 0.7740 - mae: 0.3117 - val_loss: 0.4813 - val_binary_accuracy: 0.7756 - val_mae: 0.3116

Training Node 3
100/100 [==============================] - 79s 792ms/step - loss: 0.4769 - binary_accuracy: 0.7795 - mae: 0.3097 - val_loss: 0.4927 - val_binary_accuracy: 0.7666 - val_mae: 0.3099

Iteration 38/100
14/14 [==============================] - 33s 2s/step - loss: 0.4784 - binary_accuracy: 0.7709 - mae: 0.3038

Training Node 1
100/100 [==============================] - 76s 767ms/step - loss: 0.4739 - binary_accuracy: 0.7748 - mae: 0.3071 - val_loss: 0.5055 - val_binary_accuracy: 0.7540 - val_mae: 0.3179

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4714 - binary_accuracy: 0.7781 - mae: 0.3059WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 624ms/step - loss: 0.4714 - binary_accuracy: 0.7781 - mae: 0.3059 - val_loss: 0.4781 - val_binary_accuracy: 0.7748 - val_mae: 0.3090

Training Node 3
100/100 [==============================] - 78s 782ms/step - loss: 0.4743 - binary_accuracy: 0.7774 - mae: 0.3069 - val_loss: 0.4908 - val_binary_accuracy: 0.7724 - val_mae: 0.3093

Iteration 39/100
14/14 [==============================] - 33s 2s/step - loss: 0.4733 - binary_accuracy: 0.7761 - mae: 0.3022
New best validation loss: 0.473254531621933

Training Node 1
100/100 [==============================] - 78s 783ms/step - loss: 0.4714 - binary_accuracy: 0.7798 - mae: 0.3061 - val_loss: 0.5050 - val_binary_accuracy: 0.7649 - val_mae: 0.3201

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4766 - binary_accuracy: 0.7751 - mae: 0.3110WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 63s 631ms/step - loss: 0.4766 - binary_accuracy: 0.7751 - mae: 0.3110 - val_loss: 0.4732 - val_binary_accuracy: 0.7794 - val_mae: 0.3021

Training Node 3
100/100 [==============================] - 81s 814ms/step - loss: 0.4822 - binary_accuracy: 0.7710 - mae: 0.3136 - val_loss: 0.5003 - val_binary_accuracy: 0.7620 - val_mae: 0.3143

Iteration 40/100
14/14 [==============================] - 33s 2s/step - loss: 0.4704 - binary_accuracy: 0.7782 - mae: 0.3072
New best validation loss: 0.47040051221847534

Training Node 1
100/100 [==============================] - 77s 769ms/step - loss: 0.4755 - binary_accuracy: 0.7767 - mae: 0.3085 - val_loss: 0.5052 - val_binary_accuracy: 0.7663 - val_mae: 0.3044

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4761 - binary_accuracy: 0.7775 - mae: 0.3093WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 63s 626ms/step - loss: 0.4761 - binary_accuracy: 0.7775 - mae: 0.3093 - val_loss: 0.4738 - val_binary_accuracy: 0.7770 - val_mae: 0.3095

Training Node 3
100/100 [==============================] - 80s 803ms/step - loss: 0.4732 - binary_accuracy: 0.7809 - mae: 0.3062 - val_loss: 0.5057 - val_binary_accuracy: 0.7633 - val_mae: 0.3167

Iteration 41/100
14/14 [==============================] - 33s 2s/step - loss: 0.4728 - binary_accuracy: 0.7777 - mae: 0.3060

Training Node 1
100/100 [==============================] - 78s 781ms/step - loss: 0.4812 - binary_accuracy: 0.7763 - mae: 0.3122 - val_loss: 0.5627 - val_binary_accuracy: 0.7457 - val_mae: 0.3111

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4772 - binary_accuracy: 0.7774 - mae: 0.3110WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 63s 626ms/step - loss: 0.4772 - binary_accuracy: 0.7774 - mae: 0.3110 - val_loss: 0.4765 - val_binary_accuracy: 0.7755 - val_mae: 0.3135

Training Node 3
100/100 [==============================] - 79s 796ms/step - loss: 0.4713 - binary_accuracy: 0.7811 - mae: 0.3052 - val_loss: 0.6048 - val_binary_accuracy: 0.7348 - val_mae: 0.3142

Iteration 42/100
14/14 [==============================] - 33s 2s/step - loss: 0.5287 - binary_accuracy: 0.7560 - mae: 0.3029

Training Node 1
100/100 [==============================] - 78s 787ms/step - loss: 0.4784 - binary_accuracy: 0.7723 - mae: 0.3113 - val_loss: 0.4951 - val_binary_accuracy: 0.7699 - val_mae: 0.3028

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4783 - binary_accuracy: 0.7727 - mae: 0.3102WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 63s 627ms/step - loss: 0.4783 - binary_accuracy: 0.7727 - mae: 0.3102 - val_loss: 0.4775 - val_binary_accuracy: 0.7746 - val_mae: 0.3006

Training Node 3
100/100 [==============================] - 80s 799ms/step - loss: 0.4777 - binary_accuracy: 0.7784 - mae: 0.3088 - val_loss: 0.5330 - val_binary_accuracy: 0.7544 - val_mae: 0.3189

Iteration 43/100
14/14 [==============================] - 33s 2s/step - loss: 0.4811 - binary_accuracy: 0.7748 - mae: 0.3025

Training Node 1
100/100 [==============================] - 76s 767ms/step - loss: 0.4796 - binary_accuracy: 0.7707 - mae: 0.3121 - val_loss: 0.4903 - val_binary_accuracy: 0.7661 - val_mae: 0.3153

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4783 - binary_accuracy: 0.7718 - mae: 0.3116WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 622ms/step - loss: 0.4783 - binary_accuracy: 0.7718 - mae: 0.3116 - val_loss: 0.4821 - val_binary_accuracy: 0.7746 - val_mae: 0.3086

Training Node 3
100/100 [==============================] - 78s 780ms/step - loss: 0.4773 - binary_accuracy: 0.7742 - mae: 0.3094 - val_loss: 0.5217 - val_binary_accuracy: 0.7606 - val_mae: 0.3051

Iteration 44/100
14/14 [==============================] - 34s 2s/step - loss: 0.4792 - binary_accuracy: 0.7736 - mae: 0.2998

Training Node 1
100/100 [==============================] - 79s 795ms/step - loss: 0.4738 - binary_accuracy: 0.7751 - mae: 0.3077 - val_loss: 0.4980 - val_binary_accuracy: 0.7631 - val_mae: 0.3065

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4728 - binary_accuracy: 0.7818 - mae: 0.3044WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 61s 611ms/step - loss: 0.4728 - binary_accuracy: 0.7818 - mae: 0.3044 - val_loss: 0.4773 - val_binary_accuracy: 0.7762 - val_mae: 0.3111

Training Node 3
100/100 [==============================] - 81s 814ms/step - loss: 0.4753 - binary_accuracy: 0.7762 - mae: 0.3088 - val_loss: 0.5101 - val_binary_accuracy: 0.7655 - val_mae: 0.3195

Iteration 45/100
14/14 [==============================] - 33s 2s/step - loss: 0.4827 - binary_accuracy: 0.7744 - mae: 0.3081

Training Node 1
100/100 [==============================] - 76s 766ms/step - loss: 0.4822 - binary_accuracy: 0.7735 - mae: 0.3133 - val_loss: 0.5311 - val_binary_accuracy: 0.7543 - val_mae: 0.3311

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4722 - binary_accuracy: 0.7761 - mae: 0.3058WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 624ms/step - loss: 0.4722 - binary_accuracy: 0.7761 - mae: 0.3058 - val_loss: 0.4702 - val_binary_accuracy: 0.7780 - val_mae: 0.3083

Training Node 3
100/100 [==============================] - 79s 796ms/step - loss: 0.4711 - binary_accuracy: 0.7822 - mae: 0.3052 - val_loss: 0.4875 - val_binary_accuracy: 0.7724 - val_mae: 0.2972

Iteration 46/100
14/14 [==============================] - 33s 2s/step - loss: 0.4745 - binary_accuracy: 0.7756 - mae: 0.3021

Training Node 1
100/100 [==============================] - 77s 769ms/step - loss: 0.4747 - binary_accuracy: 0.7789 - mae: 0.3084 - val_loss: 0.4945 - val_binary_accuracy: 0.7705 - val_mae: 0.3110

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4761 - binary_accuracy: 0.7713 - mae: 0.3093WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 621ms/step - loss: 0.4761 - binary_accuracy: 0.7713 - mae: 0.3093 - val_loss: 0.4768 - val_binary_accuracy: 0.7789 - val_mae: 0.3089

Training Node 3
100/100 [==============================] - 78s 785ms/step - loss: 0.4696 - binary_accuracy: 0.7791 - mae: 0.3055 - val_loss: 0.5474 - val_binary_accuracy: 0.7456 - val_mae: 0.3113

Iteration 47/100
14/14 [==============================] - 33s 2s/step - loss: 0.4773 - binary_accuracy: 0.7751 - mae: 0.2979

Training Node 1
100/100 [==============================] - 77s 772ms/step - loss: 0.4716 - binary_accuracy: 0.7795 - mae: 0.3059 - val_loss: 0.5097 - val_binary_accuracy: 0.7628 - val_mae: 0.3083

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4763 - binary_accuracy: 0.7771 - mae: 0.3092WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 623ms/step - loss: 0.4763 - binary_accuracy: 0.7771 - mae: 0.3092 - val_loss: 0.4730 - val_binary_accuracy: 0.7768 - val_mae: 0.3039

Training Node 3
100/100 [==============================] - 78s 785ms/step - loss: 0.4641 - binary_accuracy: 0.7828 - mae: 0.3006 - val_loss: 0.4958 - val_binary_accuracy: 0.7675 - val_mae: 0.3040

Iteration 48/100
14/14 [==============================] - 33s 2s/step - loss: 0.4760 - binary_accuracy: 0.7763 - mae: 0.2970

Training Node 1
100/100 [==============================] - 77s 771ms/step - loss: 0.4757 - binary_accuracy: 0.7748 - mae: 0.3079 - val_loss: 0.4982 - val_binary_accuracy: 0.7703 - val_mae: 0.3119

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4717 - binary_accuracy: 0.7736 - mae: 0.3044WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 621ms/step - loss: 0.4717 - binary_accuracy: 0.7736 - mae: 0.3044 - val_loss: 0.4713 - val_binary_accuracy: 0.7756 - val_mae: 0.3105

Training Node 3
100/100 [==============================] - 78s 782ms/step - loss: 0.4736 - binary_accuracy: 0.7760 - mae: 0.3077 - val_loss: 0.5041 - val_binary_accuracy: 0.7648 - val_mae: 0.3018

Iteration 49/100
14/14 [==============================] - 33s 2s/step - loss: 0.4708 - binary_accuracy: 0.7784 - mae: 0.2985

Training Node 1
100/100 [==============================] - 77s 768ms/step - loss: 0.4713 - binary_accuracy: 0.7798 - mae: 0.3057 - val_loss: 0.4919 - val_binary_accuracy: 0.7703 - val_mae: 0.3073

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4761 - binary_accuracy: 0.7772 - mae: 0.3091WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 618ms/step - loss: 0.4761 - binary_accuracy: 0.7772 - mae: 0.3091 - val_loss: 0.4758 - val_binary_accuracy: 0.7770 - val_mae: 0.2978

Training Node 3
100/100 [==============================] - 79s 788ms/step - loss: 0.4696 - binary_accuracy: 0.7807 - mae: 0.3035 - val_loss: 0.5004 - val_binary_accuracy: 0.7720 - val_mae: 0.2981

Iteration 50/100
14/14 [==============================] - 34s 2s/step - loss: 0.4733 - binary_accuracy: 0.7789 - mae: 0.2944

Training Node 1
100/100 [==============================] - 77s 769ms/step - loss: 0.4721 - binary_accuracy: 0.7779 - mae: 0.3079 - val_loss: 0.4873 - val_binary_accuracy: 0.7734 - val_mae: 0.3040

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4751 - binary_accuracy: 0.7734 - mae: 0.3084WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 63s 625ms/step - loss: 0.4751 - binary_accuracy: 0.7734 - mae: 0.3084 - val_loss: 0.4769 - val_binary_accuracy: 0.7771 - val_mae: 0.3087

Training Node 3
100/100 [==============================] - 78s 784ms/step - loss: 0.4696 - binary_accuracy: 0.7789 - mae: 0.3045 - val_loss: 0.4885 - val_binary_accuracy: 0.7696 - val_mae: 0.3073

Iteration 51/100
14/14 [==============================] - 33s 2s/step - loss: 0.4717 - binary_accuracy: 0.7784 - mae: 0.2987

Training Node 1
100/100 [==============================] - 78s 786ms/step - loss: 0.4687 - binary_accuracy: 0.7791 - mae: 0.3037 - val_loss: 0.5206 - val_binary_accuracy: 0.7588 - val_mae: 0.3414

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4724 - binary_accuracy: 0.7787 - mae: 0.3044WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 63s 629ms/step - loss: 0.4724 - binary_accuracy: 0.7787 - mae: 0.3044 - val_loss: 0.4699 - val_binary_accuracy: 0.7770 - val_mae: 0.3098

Training Node 3
100/100 [==============================] - 81s 817ms/step - loss: 0.4742 - binary_accuracy: 0.7786 - mae: 0.3076 - val_loss: 0.4936 - val_binary_accuracy: 0.7671 - val_mae: 0.3039

Iteration 52/100
14/14 [==============================] - 33s 2s/step - loss: 0.4710 - binary_accuracy: 0.7778 - mae: 0.3025

Training Node 1
100/100 [==============================] - 77s 772ms/step - loss: 0.4705 - binary_accuracy: 0.7801 - mae: 0.3060 - val_loss: 0.4924 - val_binary_accuracy: 0.7719 - val_mae: 0.3035

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4748 - binary_accuracy: 0.7736 - mae: 0.3082WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 65s 646ms/step - loss: 0.4748 - binary_accuracy: 0.7736 - mae: 0.3082 - val_loss: 0.4763 - val_binary_accuracy: 0.7755 - val_mae: 0.3108

Training Node 3
100/100 [==============================] - 79s 795ms/step - loss: 0.4680 - binary_accuracy: 0.7767 - mae: 0.3036 - val_loss: 0.5788 - val_binary_accuracy: 0.7307 - val_mae: 0.3251

Iteration 53/100
14/14 [==============================] - 33s 2s/step - loss: 0.5085 - binary_accuracy: 0.7604 - mae: 0.3103

Training Node 1
100/100 [==============================] - 80s 804ms/step - loss: 0.4684 - binary_accuracy: 0.7815 - mae: 0.3050 - val_loss: 0.4902 - val_binary_accuracy: 0.7703 - val_mae: 0.3076

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4798 - binary_accuracy: 0.7722 - mae: 0.3120WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 625ms/step - loss: 0.4798 - binary_accuracy: 0.7722 - mae: 0.3120 - val_loss: 0.4722 - val_binary_accuracy: 0.7760 - val_mae: 0.3070

Training Node 3
100/100 [==============================] - 78s 783ms/step - loss: 0.4654 - binary_accuracy: 0.7819 - mae: 0.3006 - val_loss: 0.5072 - val_binary_accuracy: 0.7612 - val_mae: 0.3177

Iteration 54/100
14/14 [==============================] - 33s 2s/step - loss: 0.4766 - binary_accuracy: 0.7724 - mae: 0.3056

Training Node 1
100/100 [==============================] - 77s 771ms/step - loss: 0.4692 - binary_accuracy: 0.7778 - mae: 0.3043 - val_loss: 0.5339 - val_binary_accuracy: 0.7540 - val_mae: 0.3090

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4691 - binary_accuracy: 0.7795 - mae: 0.3042WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 621ms/step - loss: 0.4691 - binary_accuracy: 0.7795 - mae: 0.3042 - val_loss: 0.4779 - val_binary_accuracy: 0.7791 - val_mae: 0.3062

Training Node 3
100/100 [==============================] - 79s 793ms/step - loss: 0.4691 - binary_accuracy: 0.7802 - mae: 0.3045 - val_loss: 0.4979 - val_binary_accuracy: 0.7706 - val_mae: 0.2956

Iteration 55/100
14/14 [==============================] - 33s 2s/step - loss: 0.4733 - binary_accuracy: 0.7795 - mae: 0.2935

Training Node 1
100/100 [==============================] - 79s 792ms/step - loss: 0.4627 - binary_accuracy: 0.7818 - mae: 0.2990 - val_loss: 0.5078 - val_binary_accuracy: 0.7634 - val_mae: 0.2987

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4721 - binary_accuracy: 0.7745 - mae: 0.3048WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 625ms/step - loss: 0.4721 - binary_accuracy: 0.7745 - mae: 0.3048 - val_loss: 0.4791 - val_binary_accuracy: 0.7761 - val_mae: 0.3026

Training Node 3
100/100 [==============================] - 79s 794ms/step - loss: 0.4630 - binary_accuracy: 0.7848 - mae: 0.2982 - val_loss: 0.4837 - val_binary_accuracy: 0.7723 - val_mae: 0.3101

Iteration 56/100
14/14 [==============================] - 35s 2s/step - loss: 0.4707 - binary_accuracy: 0.7785 - mae: 0.2965

Training Node 1
100/100 [==============================] - 77s 776ms/step - loss: 0.4683 - binary_accuracy: 0.7775 - mae: 0.3051 - val_loss: 0.5130 - val_binary_accuracy: 0.7575 - val_mae: 0.3045

Training Node 2
 62/100 [=================>............] - ETA: 13s - loss: 0.4689 - binary_accuracy: 0.7795 - mae: 0.3042WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 56s 561ms/step - loss: 0.4689 - binary_accuracy: 0.7795 - mae: 0.3042 - val_loss: 0.4703 - val_binary_accuracy: 0.7756 - val_mae: 0.3031

Training Node 3
100/100 [==============================] - 73s 728ms/step - loss: 0.4626 - binary_accuracy: 0.7836 - mae: 0.2999 - val_loss: 0.5144 - val_binary_accuracy: 0.7559 - val_mae: 0.3103

Iteration 57/100
14/14 [==============================] - 33s 2s/step - loss: 0.4836 - binary_accuracy: 0.7729 - mae: 0.3018

Training Node 1
100/100 [==============================] - 70s 698ms/step - loss: 0.4642 - binary_accuracy: 0.7815 - mae: 0.3013 - val_loss: 0.7123 - val_binary_accuracy: 0.7032 - val_mae: 0.3335

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4717 - binary_accuracy: 0.7814 - mae: 0.3059WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 60s 601ms/step - loss: 0.4717 - binary_accuracy: 0.7814 - mae: 0.3059 - val_loss: 0.4682 - val_binary_accuracy: 0.7820 - val_mae: 0.3026

Training Node 3
100/100 [==============================] - 74s 743ms/step - loss: 0.4671 - binary_accuracy: 0.7818 - mae: 0.3025 - val_loss: 0.4883 - val_binary_accuracy: 0.7744 - val_mae: 0.3108

Iteration 58/100
14/14 [==============================] - 33s 2s/step - loss: 0.4671 - binary_accuracy: 0.7808 - mae: 0.3001
New best validation loss: 0.46708372235298157

Training Node 1
100/100 [==============================] - 69s 689ms/step - loss: 0.4716 - binary_accuracy: 0.7809 - mae: 0.3063 - val_loss: 0.4807 - val_binary_accuracy: 0.7748 - val_mae: 0.3017

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4725 - binary_accuracy: 0.7772 - mae: 0.3058WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 60s 600ms/step - loss: 0.4725 - binary_accuracy: 0.7772 - mae: 0.3058 - val_loss: 0.4731 - val_binary_accuracy: 0.7752 - val_mae: 0.3030

Training Node 3
100/100 [==============================] - 74s 740ms/step - loss: 0.4703 - binary_accuracy: 0.7808 - mae: 0.3052 - val_loss: 0.5212 - val_binary_accuracy: 0.7472 - val_mae: 0.3067

Iteration 59/100
14/14 [==============================] - 33s 2s/step - loss: 0.4783 - binary_accuracy: 0.7691 - mae: 0.2976

Training Node 1
100/100 [==============================] - 70s 704ms/step - loss: 0.4652 - binary_accuracy: 0.7812 - mae: 0.3012 - val_loss: 0.4836 - val_binary_accuracy: 0.7755 - val_mae: 0.3037

Training Node 2
 62/100 [=================>............] - ETA: 12s - loss: 0.4690 - binary_accuracy: 0.7781 - mae: 0.3041WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 55s 552ms/step - loss: 0.4690 - binary_accuracy: 0.7781 - mae: 0.3041 - val_loss: 0.4718 - val_binary_accuracy: 0.7779 - val_mae: 0.2968

Training Node 3
100/100 [==============================] - 73s 732ms/step - loss: 0.4599 - binary_accuracy: 0.7881 - mae: 0.2970 - val_loss: 0.5269 - val_binary_accuracy: 0.7543 - val_mae: 0.3005

Iteration 60/100
14/14 [==============================] - 33s 2s/step - loss: 0.4869 - binary_accuracy: 0.7699 - mae: 0.2931

Training Node 1
100/100 [==============================] - 71s 715ms/step - loss: 0.4686 - binary_accuracy: 0.7780 - mae: 0.3041 - val_loss: 0.4843 - val_binary_accuracy: 0.7717 - val_mae: 0.3072

Training Node 2
 62/100 [=================>............] - ETA: 13s - loss: 0.4709 - binary_accuracy: 0.7761 - mae: 0.3038WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 59s 593ms/step - loss: 0.4709 - binary_accuracy: 0.7761 - mae: 0.3038 - val_loss: 0.4698 - val_binary_accuracy: 0.7792 - val_mae: 0.3036

Training Node 3
100/100 [==============================] - 75s 749ms/step - loss: 0.4742 - binary_accuracy: 0.7738 - mae: 0.3079 - val_loss: 0.4794 - val_binary_accuracy: 0.7736 - val_mae: 0.3079

Iteration 61/100
14/14 [==============================] - 34s 2s/step - loss: 0.4673 - binary_accuracy: 0.7764 - mae: 0.3009

Training Node 1
100/100 [==============================] - 70s 697ms/step - loss: 0.4717 - binary_accuracy: 0.7764 - mae: 0.3070 - val_loss: 0.4838 - val_binary_accuracy: 0.7705 - val_mae: 0.2995

Training Node 2
 62/100 [=================>............] - ETA: 12s - loss: 0.4674 - binary_accuracy: 0.7808 - mae: 0.3042WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 56s 555ms/step - loss: 0.4674 - binary_accuracy: 0.7808 - mae: 0.3042 - val_loss: 0.4895 - val_binary_accuracy: 0.7686 - val_mae: 0.2986

Training Node 3
100/100 [==============================] - 71s 716ms/step - loss: 0.4660 - binary_accuracy: 0.7834 - mae: 0.3016 - val_loss: 0.4961 - val_binary_accuracy: 0.7638 - val_mae: 0.3053

Iteration 62/100
14/14 [==============================] - 34s 2s/step - loss: 0.4701 - binary_accuracy: 0.7758 - mae: 0.2951

Training Node 1
100/100 [==============================] - 74s 739ms/step - loss: 0.4652 - binary_accuracy: 0.7850 - mae: 0.3007 - val_loss: 0.4908 - val_binary_accuracy: 0.7688 - val_mae: 0.3050

Training Node 2
 62/100 [=================>............] - ETA: 13s - loss: 0.4723 - binary_accuracy: 0.7763 - mae: 0.3057WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 57s 567ms/step - loss: 0.4723 - binary_accuracy: 0.7763 - mae: 0.3057 - val_loss: 0.4788 - val_binary_accuracy: 0.7718 - val_mae: 0.3050

Training Node 3
100/100 [==============================] - 72s 718ms/step - loss: 0.4680 - binary_accuracy: 0.7840 - mae: 0.3024 - val_loss: 0.5215 - val_binary_accuracy: 0.7532 - val_mae: 0.3123

Iteration 63/100
14/14 [==============================] - 33s 2s/step - loss: 0.4769 - binary_accuracy: 0.7760 - mae: 0.2993

Training Node 1
100/100 [==============================] - 69s 690ms/step - loss: 0.4637 - binary_accuracy: 0.7857 - mae: 0.3003 - val_loss: 0.4798 - val_binary_accuracy: 0.7776 - val_mae: 0.3120

Training Node 2
 62/100 [=================>............] - ETA: 12s - loss: 0.4701 - binary_accuracy: 0.7784 - mae: 0.3051WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 56s 555ms/step - loss: 0.4701 - binary_accuracy: 0.7784 - mae: 0.3051 - val_loss: 0.4714 - val_binary_accuracy: 0.7778 - val_mae: 0.3029

Training Node 3
100/100 [==============================] - 76s 764ms/step - loss: 0.4631 - binary_accuracy: 0.7847 - mae: 0.2997 - val_loss: 0.4781 - val_binary_accuracy: 0.7741 - val_mae: 0.3123

Iteration 64/100
14/14 [==============================] - 33s 2s/step - loss: 0.4665 - binary_accuracy: 0.7786 - mae: 0.3060
New best validation loss: 0.46649834513664246

Training Node 1
100/100 [==============================] - 70s 703ms/step - loss: 0.4678 - binary_accuracy: 0.7824 - mae: 0.3039 - val_loss: 0.5072 - val_binary_accuracy: 0.7618 - val_mae: 0.3089

Training Node 2
 62/100 [=================>............] - ETA: 12s - loss: 0.4701 - binary_accuracy: 0.7786 - mae: 0.3069WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 56s 557ms/step - loss: 0.4701 - binary_accuracy: 0.7786 - mae: 0.3069 - val_loss: 0.4713 - val_binary_accuracy: 0.7789 - val_mae: 0.3024

Training Node 3
100/100 [==============================] - 73s 729ms/step - loss: 0.4648 - binary_accuracy: 0.7855 - mae: 0.3006 - val_loss: 0.4964 - val_binary_accuracy: 0.7672 - val_mae: 0.3002

Iteration 65/100
14/14 [==============================] - 34s 2s/step - loss: 0.4811 - binary_accuracy: 0.7728 - mae: 0.2942

Training Node 1
100/100 [==============================] - 70s 698ms/step - loss: 0.4598 - binary_accuracy: 0.7856 - mae: 0.2973 - val_loss: 0.5082 - val_binary_accuracy: 0.7648 - val_mae: 0.3262

Training Node 2
 62/100 [=================>............] - ETA: 14s - loss: 0.4693 - binary_accuracy: 0.7824 - mae: 0.3021WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 59s 590ms/step - loss: 0.4693 - binary_accuracy: 0.7824 - mae: 0.3021 - val_loss: 0.4734 - val_binary_accuracy: 0.7756 - val_mae: 0.3013

Training Node 3
100/100 [==============================] - 73s 731ms/step - loss: 0.4576 - binary_accuracy: 0.7873 - mae: 0.2959 - val_loss: 0.4920 - val_binary_accuracy: 0.7694 - val_mae: 0.3047

Iteration 66/100
14/14 [==============================] - 33s 2s/step - loss: 0.4695 - binary_accuracy: 0.7782 - mae: 0.2995

Training Node 1
100/100 [==============================] - 69s 688ms/step - loss: 0.4628 - binary_accuracy: 0.7823 - mae: 0.3000 - val_loss: 0.4875 - val_binary_accuracy: 0.7691 - val_mae: 0.3135

Training Node 2
 62/100 [=================>............] - ETA: 14s - loss: 0.4702 - binary_accuracy: 0.7785 - mae: 0.3041WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 59s 588ms/step - loss: 0.4702 - binary_accuracy: 0.7785 - mae: 0.3041 - val_loss: 0.4793 - val_binary_accuracy: 0.7736 - val_mae: 0.3043

Training Node 3
100/100 [==============================] - 73s 731ms/step - loss: 0.4635 - binary_accuracy: 0.7824 - mae: 0.2995 - val_loss: 0.4806 - val_binary_accuracy: 0.7732 - val_mae: 0.3105

Iteration 67/100
14/14 [==============================] - 33s 2s/step - loss: 0.4651 - binary_accuracy: 0.7837 - mae: 0.3034
New best validation loss: 0.4650847613811493

Training Node 1
100/100 [==============================] - 70s 696ms/step - loss: 0.4594 - binary_accuracy: 0.7821 - mae: 0.2988 - val_loss: 0.5086 - val_binary_accuracy: 0.7576 - val_mae: 0.3125

Training Node 2
 62/100 [=================>............] - ETA: 12s - loss: 0.4638 - binary_accuracy: 0.7847 - mae: 0.3019WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 56s 555ms/step - loss: 0.4638 - binary_accuracy: 0.7847 - mae: 0.3019 - val_loss: 0.4909 - val_binary_accuracy: 0.7719 - val_mae: 0.3027

Training Node 3
100/100 [==============================] - 72s 719ms/step - loss: 0.4595 - binary_accuracy: 0.7833 - mae: 0.2977 - val_loss: 0.4902 - val_binary_accuracy: 0.7692 - val_mae: 0.2956

Iteration 68/100
14/14 [==============================] - 33s 2s/step - loss: 0.4658 - binary_accuracy: 0.7808 - mae: 0.2930

Training Node 1
100/100 [==============================] - 72s 720ms/step - loss: 0.4671 - binary_accuracy: 0.7828 - mae: 0.3027 - val_loss: 0.4972 - val_binary_accuracy: 0.7684 - val_mae: 0.3004

Training Node 2
 62/100 [=================>............] - ETA: 12s - loss: 0.4706 - binary_accuracy: 0.7800 - mae: 0.3052WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 55s 554ms/step - loss: 0.4706 - binary_accuracy: 0.7800 - mae: 0.3052 - val_loss: 0.4719 - val_binary_accuracy: 0.7787 - val_mae: 0.3080

Training Node 3
100/100 [==============================] - 74s 737ms/step - loss: 0.4669 - binary_accuracy: 0.7833 - mae: 0.3026 - val_loss: 0.4900 - val_binary_accuracy: 0.7753 - val_mae: 0.3060

Iteration 69/100
14/14 [==============================] - 33s 2s/step - loss: 0.4679 - binary_accuracy: 0.7821 - mae: 0.2990

Training Node 1
100/100 [==============================] - 72s 717ms/step - loss: 0.4635 - binary_accuracy: 0.7824 - mae: 0.3008 - val_loss: 0.4943 - val_binary_accuracy: 0.7669 - val_mae: 0.3173

Training Node 2
 62/100 [=================>............] - ETA: 13s - loss: 0.4729 - binary_accuracy: 0.7758 - mae: 0.3070WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 57s 568ms/step - loss: 0.4729 - binary_accuracy: 0.7758 - mae: 0.3070 - val_loss: 0.4726 - val_binary_accuracy: 0.7765 - val_mae: 0.2997

Training Node 3
100/100 [==============================] - 72s 719ms/step - loss: 0.4590 - binary_accuracy: 0.7841 - mae: 0.2972 - val_loss: 0.4884 - val_binary_accuracy: 0.7691 - val_mae: 0.3042

Iteration 70/100
14/14 [==============================] - 33s 2s/step - loss: 0.4662 - binary_accuracy: 0.7809 - mae: 0.2977

Training Node 1
100/100 [==============================] - 70s 701ms/step - loss: 0.4638 - binary_accuracy: 0.7820 - mae: 0.3010 - val_loss: 0.4859 - val_binary_accuracy: 0.7684 - val_mae: 0.3077

Training Node 2
 62/100 [=================>............] - ETA: 13s - loss: 0.4713 - binary_accuracy: 0.7763 - mae: 0.3050WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 56s 563ms/step - loss: 0.4713 - binary_accuracy: 0.7763 - mae: 0.3050 - val_loss: 0.4716 - val_binary_accuracy: 0.7799 - val_mae: 0.3094

Training Node 3
100/100 [==============================] - 69s 693ms/step - loss: 0.4539 - binary_accuracy: 0.7873 - mae: 0.2928 - val_loss: 0.4751 - val_binary_accuracy: 0.7769 - val_mae: 0.3082

Iteration 71/100
14/14 [==============================] - 33s 2s/step - loss: 0.4637 - binary_accuracy: 0.7801 - mae: 0.3005
New best validation loss: 0.4636659324169159

Training Node 1
100/100 [==============================] - 76s 757ms/step - loss: 0.4610 - binary_accuracy: 0.7847 - mae: 0.2980 - val_loss: 0.4983 - val_binary_accuracy: 0.7664 - val_mae: 0.3007

Training Node 2
 62/100 [=================>............] - ETA: 13s - loss: 0.4685 - binary_accuracy: 0.7858 - mae: 0.3017WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 57s 568ms/step - loss: 0.4685 - binary_accuracy: 0.7858 - mae: 0.3017 - val_loss: 0.4768 - val_binary_accuracy: 0.7781 - val_mae: 0.3114

Training Node 3
100/100 [==============================] - 71s 713ms/step - loss: 0.4625 - binary_accuracy: 0.7859 - mae: 0.2987 - val_loss: 0.4818 - val_binary_accuracy: 0.7758 - val_mae: 0.2997

Iteration 72/100
14/14 [==============================] - 34s 2s/step - loss: 0.4656 - binary_accuracy: 0.7830 - mae: 0.2936

Training Node 1
100/100 [==============================] - 71s 708ms/step - loss: 0.4582 - binary_accuracy: 0.7869 - mae: 0.2974 - val_loss: 0.4885 - val_binary_accuracy: 0.7688 - val_mae: 0.2949

Training Node 2
 62/100 [=================>............] - ETA: 14s - loss: 0.4664 - binary_accuracy: 0.7763 - mae: 0.3025WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 58s 579ms/step - loss: 0.4664 - binary_accuracy: 0.7763 - mae: 0.3025 - val_loss: 0.4732 - val_binary_accuracy: 0.7794 - val_mae: 0.3025

Training Node 3
100/100 [==============================] - 72s 717ms/step - loss: 0.4626 - binary_accuracy: 0.7841 - mae: 0.2998 - val_loss: 0.5017 - val_binary_accuracy: 0.7730 - val_mae: 0.2907

Iteration 73/100
14/14 [==============================] - 33s 2s/step - loss: 0.4726 - binary_accuracy: 0.7796 - mae: 0.2871

Training Node 1
100/100 [==============================] - 69s 694ms/step - loss: 0.4600 - binary_accuracy: 0.7830 - mae: 0.2977 - val_loss: 0.4833 - val_binary_accuracy: 0.7768 - val_mae: 0.3011

Training Node 2
 62/100 [=================>............] - ETA: 13s - loss: 0.4690 - binary_accuracy: 0.7779 - mae: 0.3036WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 57s 567ms/step - loss: 0.4690 - binary_accuracy: 0.7779 - mae: 0.3036 - val_loss: 0.4700 - val_binary_accuracy: 0.7780 - val_mae: 0.3009

Training Node 3
100/100 [==============================] - 70s 703ms/step - loss: 0.4605 - binary_accuracy: 0.7856 - mae: 0.2984 - val_loss: 0.4846 - val_binary_accuracy: 0.7760 - val_mae: 0.2926

Iteration 74/100
14/14 [==============================] - 33s 2s/step - loss: 0.4727 - binary_accuracy: 0.7790 - mae: 0.2926

Training Node 1
100/100 [==============================] - 75s 755ms/step - loss: 0.4591 - binary_accuracy: 0.7862 - mae: 0.2982 - val_loss: 0.5057 - val_binary_accuracy: 0.7685 - val_mae: 0.3064

Training Node 2
 62/100 [=================>............] - ETA: 14s - loss: 0.4692 - binary_accuracy: 0.7772 - mae: 0.3036WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 58s 580ms/step - loss: 0.4692 - binary_accuracy: 0.7772 - mae: 0.3036 - val_loss: 0.4649 - val_binary_accuracy: 0.7799 - val_mae: 0.2994

Training Node 3
100/100 [==============================] - 74s 743ms/step - loss: 0.4602 - binary_accuracy: 0.7860 - mae: 0.2967 - val_loss: 0.4781 - val_binary_accuracy: 0.7737 - val_mae: 0.3034

Iteration 75/100
14/14 [==============================] - 34s 2s/step - loss: 0.4680 - binary_accuracy: 0.7802 - mae: 0.2993

Training Node 1
100/100 [==============================] - 74s 740ms/step - loss: 0.4589 - binary_accuracy: 0.7855 - mae: 0.2979 - val_loss: 0.5142 - val_binary_accuracy: 0.7551 - val_mae: 0.3000

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4678 - binary_accuracy: 0.7784 - mae: 0.3021WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 59s 594ms/step - loss: 0.4678 - binary_accuracy: 0.7784 - mae: 0.3021 - val_loss: 0.4782 - val_binary_accuracy: 0.7778 - val_mae: 0.3052

Training Node 3
100/100 [==============================] - 74s 743ms/step - loss: 0.4567 - binary_accuracy: 0.7900 - mae: 0.2937 - val_loss: 0.4828 - val_binary_accuracy: 0.7700 - val_mae: 0.3078

Iteration 76/100
14/14 [==============================] - 33s 2s/step - loss: 0.4680 - binary_accuracy: 0.7806 - mae: 0.2976

Training Node 1
100/100 [==============================] - 70s 704ms/step - loss: 0.4568 - binary_accuracy: 0.7842 - mae: 0.2961 - val_loss: 0.4884 - val_binary_accuracy: 0.7723 - val_mae: 0.3056

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4667 - binary_accuracy: 0.7828 - mae: 0.3032WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 61s 615ms/step - loss: 0.4667 - binary_accuracy: 0.7828 - mae: 0.3032 - val_loss: 0.4726 - val_binary_accuracy: 0.7780 - val_mae: 0.2983

Training Node 3
100/100 [==============================] - 75s 750ms/step - loss: 0.4584 - binary_accuracy: 0.7865 - mae: 0.2950 - val_loss: 0.4982 - val_binary_accuracy: 0.7672 - val_mae: 0.3035

Iteration 77/100
14/14 [==============================] - 33s 2s/step - loss: 0.4661 - binary_accuracy: 0.7777 - mae: 0.2933

Training Node 1
100/100 [==============================] - 69s 694ms/step - loss: 0.4595 - binary_accuracy: 0.7859 - mae: 0.2972 - val_loss: 0.4904 - val_binary_accuracy: 0.7662 - val_mae: 0.3042

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4653 - binary_accuracy: 0.7827 - mae: 0.3016WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 617ms/step - loss: 0.4653 - binary_accuracy: 0.7827 - mae: 0.3016 - val_loss: 0.4707 - val_binary_accuracy: 0.7771 - val_mae: 0.3009

Training Node 3
100/100 [==============================] - 75s 750ms/step - loss: 0.4530 - binary_accuracy: 0.7896 - mae: 0.2929 - val_loss: 0.4808 - val_binary_accuracy: 0.7738 - val_mae: 0.3035

Iteration 78/100
14/14 [==============================] - 33s 2s/step - loss: 0.4626 - binary_accuracy: 0.7837 - mae: 0.2956
New best validation loss: 0.46263861656188965

Training Node 1
100/100 [==============================] - 72s 718ms/step - loss: 0.4588 - binary_accuracy: 0.7869 - mae: 0.2965 - val_loss: 0.5017 - val_binary_accuracy: 0.7770 - val_mae: 0.2887

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4641 - binary_accuracy: 0.7856 - mae: 0.2981WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 60s 601ms/step - loss: 0.4641 - binary_accuracy: 0.7856 - mae: 0.2981 - val_loss: 0.4778 - val_binary_accuracy: 0.7763 - val_mae: 0.3049

Training Node 3
100/100 [==============================] - 74s 747ms/step - loss: 0.4578 - binary_accuracy: 0.7855 - mae: 0.2961 - val_loss: 0.4740 - val_binary_accuracy: 0.7785 - val_mae: 0.3028

Iteration 79/100
14/14 [==============================] - 33s 2s/step - loss: 0.4650 - binary_accuracy: 0.7815 - mae: 0.2958

Training Node 1
100/100 [==============================] - 69s 693ms/step - loss: 0.4532 - binary_accuracy: 0.7891 - mae: 0.2932 - val_loss: 0.4840 - val_binary_accuracy: 0.7702 - val_mae: 0.3070

Training Node 2
 62/100 [=================>............] - ETA: 18s - loss: 0.4673 - binary_accuracy: 0.7821 - mae: 0.3024WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 65s 656ms/step - loss: 0.4673 - binary_accuracy: 0.7821 - mae: 0.3024 - val_loss: 0.4762 - val_binary_accuracy: 0.7766 - val_mae: 0.2991

Training Node 3
100/100 [==============================] - 75s 753ms/step - loss: 0.4540 - binary_accuracy: 0.7870 - mae: 0.2928 - val_loss: 0.4774 - val_binary_accuracy: 0.7769 - val_mae: 0.2976

Iteration 80/100
14/14 [==============================] - 34s 2s/step - loss: 0.4631 - binary_accuracy: 0.7821 - mae: 0.2955

Training Node 1
100/100 [==============================] - 69s 695ms/step - loss: 0.4533 - binary_accuracy: 0.7902 - mae: 0.2940 - val_loss: 0.4760 - val_binary_accuracy: 0.7763 - val_mae: 0.2909

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4645 - binary_accuracy: 0.7834 - mae: 0.3001WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 60s 603ms/step - loss: 0.4645 - binary_accuracy: 0.7834 - mae: 0.3001 - val_loss: 0.4661 - val_binary_accuracy: 0.7800 - val_mae: 0.3044

Training Node 3
100/100 [==============================] - 77s 772ms/step - loss: 0.4584 - binary_accuracy: 0.7870 - mae: 0.2951 - val_loss: 0.4885 - val_binary_accuracy: 0.7727 - val_mae: 0.3054

Iteration 81/100
14/14 [==============================] - 34s 2s/step - loss: 0.4675 - binary_accuracy: 0.7790 - mae: 0.2954

Training Node 1
100/100 [==============================] - 75s 753ms/step - loss: 0.4519 - binary_accuracy: 0.7905 - mae: 0.2917 - val_loss: 0.4855 - val_binary_accuracy: 0.7718 - val_mae: 0.3160

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4639 - binary_accuracy: 0.7852 - mae: 0.2999WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 60s 604ms/step - loss: 0.4639 - binary_accuracy: 0.7852 - mae: 0.2999 - val_loss: 0.4738 - val_binary_accuracy: 0.7773 - val_mae: 0.2954

Training Node 3
100/100 [==============================] - 76s 760ms/step - loss: 0.4518 - binary_accuracy: 0.7871 - mae: 0.2896 - val_loss: 0.4804 - val_binary_accuracy: 0.7770 - val_mae: 0.3128

Iteration 82/100
14/14 [==============================] - 34s 2s/step - loss: 0.4642 - binary_accuracy: 0.7803 - mae: 0.3023

Training Node 1
100/100 [==============================] - 73s 727ms/step - loss: 0.4566 - binary_accuracy: 0.7868 - mae: 0.2961 - val_loss: 0.4943 - val_binary_accuracy: 0.7718 - val_mae: 0.3020

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4639 - binary_accuracy: 0.7850 - mae: 0.3013WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 63s 628ms/step - loss: 0.4639 - binary_accuracy: 0.7850 - mae: 0.3013 - val_loss: 0.4741 - val_binary_accuracy: 0.7739 - val_mae: 0.3084

Training Node 3
100/100 [==============================] - 74s 746ms/step - loss: 0.4548 - binary_accuracy: 0.7873 - mae: 0.2939 - val_loss: 0.4796 - val_binary_accuracy: 0.7712 - val_mae: 0.3073

Iteration 83/100
14/14 [==============================] - 34s 2s/step - loss: 0.4650 - binary_accuracy: 0.7817 - mae: 0.2981

Training Node 1
100/100 [==============================] - 74s 743ms/step - loss: 0.4519 - binary_accuracy: 0.7887 - mae: 0.2937 - val_loss: 0.5038 - val_binary_accuracy: 0.7664 - val_mae: 0.3046

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4608 - binary_accuracy: 0.7810 - mae: 0.2972WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 61s 613ms/step - loss: 0.4608 - binary_accuracy: 0.7810 - mae: 0.2972 - val_loss: 0.4754 - val_binary_accuracy: 0.7795 - val_mae: 0.3012

Training Node 3
100/100 [==============================] - 75s 756ms/step - loss: 0.4554 - binary_accuracy: 0.7886 - mae: 0.2941 - val_loss: 0.4811 - val_binary_accuracy: 0.7747 - val_mae: 0.3015

Iteration 84/100
14/14 [==============================] - 33s 2s/step - loss: 0.4665 - binary_accuracy: 0.7808 - mae: 0.2948

Training Node 1
100/100 [==============================] - 70s 705ms/step - loss: 0.4580 - binary_accuracy: 0.7845 - mae: 0.2953 - val_loss: 0.5124 - val_binary_accuracy: 0.7628 - val_mae: 0.3011

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4647 - binary_accuracy: 0.7830 - mae: 0.3014WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 62s 625ms/step - loss: 0.4647 - binary_accuracy: 0.7830 - mae: 0.3014 - val_loss: 0.4864 - val_binary_accuracy: 0.7703 - val_mae: 0.3000

Training Node 3
100/100 [==============================] - 75s 756ms/step - loss: 0.4633 - binary_accuracy: 0.7838 - mae: 0.3000 - val_loss: 0.4935 - val_binary_accuracy: 0.7684 - val_mae: 0.2997

Iteration 85/100
14/14 [==============================] - 33s 2s/step - loss: 0.4674 - binary_accuracy: 0.7786 - mae: 0.2937

Training Node 1
100/100 [==============================] - 71s 715ms/step - loss: 0.4539 - binary_accuracy: 0.7898 - mae: 0.2942 - val_loss: 0.4831 - val_binary_accuracy: 0.7736 - val_mae: 0.3004

Training Node 2
 62/100 [=================>............] - ETA: 17s - loss: 0.4622 - binary_accuracy: 0.7832 - mae: 0.2974WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 64s 643ms/step - loss: 0.4622 - binary_accuracy: 0.7832 - mae: 0.2974 - val_loss: 0.4704 - val_binary_accuracy: 0.7810 - val_mae: 0.2938

Training Node 3
100/100 [==============================] - 74s 745ms/step - loss: 0.4512 - binary_accuracy: 0.7921 - mae: 0.2917 - val_loss: 0.4764 - val_binary_accuracy: 0.7760 - val_mae: 0.2984

Iteration 86/100
14/14 [==============================] - 34s 2s/step - loss: 0.4627 - binary_accuracy: 0.7828 - mae: 0.2920

Training Node 1
100/100 [==============================] - 72s 724ms/step - loss: 0.4528 - binary_accuracy: 0.7862 - mae: 0.2928 - val_loss: 0.4856 - val_binary_accuracy: 0.7730 - val_mae: 0.2885

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4626 - binary_accuracy: 0.7837 - mae: 0.2983WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 60s 603ms/step - loss: 0.4626 - binary_accuracy: 0.7837 - mae: 0.2983 - val_loss: 0.4781 - val_binary_accuracy: 0.7773 - val_mae: 0.3071

Training Node 3
100/100 [==============================] - 76s 758ms/step - loss: 0.4508 - binary_accuracy: 0.7902 - mae: 0.2900 - val_loss: 0.4844 - val_binary_accuracy: 0.7729 - val_mae: 0.3010

Iteration 87/100
14/14 [==============================] - 33s 2s/step - loss: 0.4695 - binary_accuracy: 0.7796 - mae: 0.2933

Training Node 1
100/100 [==============================] - 72s 725ms/step - loss: 0.4557 - binary_accuracy: 0.7840 - mae: 0.2960 - val_loss: 0.4919 - val_binary_accuracy: 0.7710 - val_mae: 0.3034

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4604 - binary_accuracy: 0.7864 - mae: 0.2972WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 63s 630ms/step - loss: 0.4604 - binary_accuracy: 0.7864 - mae: 0.2972 - val_loss: 0.4859 - val_binary_accuracy: 0.7734 - val_mae: 0.3070

Training Node 3
100/100 [==============================] - 75s 757ms/step - loss: 0.4557 - binary_accuracy: 0.7904 - mae: 0.2939 - val_loss: 0.4899 - val_binary_accuracy: 0.7691 - val_mae: 0.2991

Iteration 88/100
14/14 [==============================] - 33s 2s/step - loss: 0.4663 - binary_accuracy: 0.7811 - mae: 0.2926

Training Node 1
100/100 [==============================] - 73s 732ms/step - loss: 0.4561 - binary_accuracy: 0.7869 - mae: 0.2951 - val_loss: 0.5250 - val_binary_accuracy: 0.7562 - val_mae: 0.3091

Training Node 2
 62/100 [=================>............] - ETA: 16s - loss: 0.4652 - binary_accuracy: 0.7805 - mae: 0.2997WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 63s 628ms/step - loss: 0.4652 - binary_accuracy: 0.7805 - mae: 0.2997 - val_loss: 0.4792 - val_binary_accuracy: 0.7779 - val_mae: 0.3022

Training Node 3
100/100 [==============================] - 74s 746ms/step - loss: 0.4452 - binary_accuracy: 0.7966 - mae: 0.2861 - val_loss: 0.5162 - val_binary_accuracy: 0.7602 - val_mae: 0.3060

Iteration 89/100
14/14 [==============================] - 33s 2s/step - loss: 0.4653 - binary_accuracy: 0.7809 - mae: 0.2963

Training Node 1
100/100 [==============================] - 70s 704ms/step - loss: 0.4513 - binary_accuracy: 0.7899 - mae: 0.2921 - val_loss: 0.4823 - val_binary_accuracy: 0.7738 - val_mae: 0.2990

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4618 - binary_accuracy: 0.7830 - mae: 0.2983WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 60s 596ms/step - loss: 0.4618 - binary_accuracy: 0.7830 - mae: 0.2983 - val_loss: 0.4708 - val_binary_accuracy: 0.7779 - val_mae: 0.3049

Training Node 3
100/100 [==============================] - 78s 778ms/step - loss: 0.4506 - binary_accuracy: 0.7896 - mae: 0.2901 - val_loss: 0.5031 - val_binary_accuracy: 0.7601 - val_mae: 0.3129

Iteration 90/100
14/14 [==============================] - 33s 2s/step - loss: 0.4688 - binary_accuracy: 0.7792 - mae: 0.2994

Training Node 1
100/100 [==============================] - 68s 686ms/step - loss: 0.4543 - binary_accuracy: 0.7877 - mae: 0.2937 - val_loss: 0.5175 - val_binary_accuracy: 0.7615 - val_mae: 0.3115

Training Node 2
 62/100 [=================>............] - ETA: 15s - loss: 0.4609 - binary_accuracy: 0.7848 - mae: 0.2967WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 60s 602ms/step - loss: 0.4609 - binary_accuracy: 0.7848 - mae: 0.2967 - val_loss: 0.4760 - val_binary_accuracy: 0.7772 - val_mae: 0.3122

Training Node 3
100/100 [==============================] - 74s 740ms/step - loss: 0.4550 - binary_accuracy: 0.7870 - mae: 0.2931 - val_loss: 0.4841 - val_binary_accuracy: 0.7745 - val_mae: 0.3013

Iteration 91/100
14/14 [==============================] - 33s 2s/step - loss: 0.4642 - binary_accuracy: 0.7820 - mae: 0.2957

Training Node 1
100/100 [==============================] - 70s 706ms/step - loss: 0.4537 - binary_accuracy: 0.7881 - mae: 0.2943 - val_loss: 0.4970 - val_binary_accuracy: 0.7674 - val_mae: 0.3033

Training Node 2
 62/100 [=================>............] - ETA: 12s - loss: 0.4626 - binary_accuracy: 0.7857 - mae: 0.2993WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 55s 554ms/step - loss: 0.4626 - binary_accuracy: 0.7857 - mae: 0.2993 - val_loss: 0.4743 - val_binary_accuracy: 0.7763 - val_mae: 0.2916

Training Node 3
100/100 [==============================] - 72s 722ms/step - loss: 0.4507 - binary_accuracy: 0.7891 - mae: 0.2903 - val_loss: 0.4839 - val_binary_accuracy: 0.7715 - val_mae: 0.2972

Iteration 92/100
14/14 [==============================] - 33s 2s/step - loss: 0.4644 - binary_accuracy: 0.7822 - mae: 0.2924

Training Node 1
100/100 [==============================] - 69s 696ms/step - loss: 0.4509 - binary_accuracy: 0.7888 - mae: 0.2909 - val_loss: 0.4958 - val_binary_accuracy: 0.7678 - val_mae: 0.3054

Training Node 2
 62/100 [=================>............] - ETA: 12s - loss: 0.4625 - binary_accuracy: 0.7839 - mae: 0.2993WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 55s 555ms/step - loss: 0.4625 - binary_accuracy: 0.7839 - mae: 0.2993 - val_loss: 0.4817 - val_binary_accuracy: 0.7725 - val_mae: 0.3048

Training Node 3
100/100 [==============================] - 74s 745ms/step - loss: 0.4493 - binary_accuracy: 0.7900 - mae: 0.2899 - val_loss: 0.4977 - val_binary_accuracy: 0.7669 - val_mae: 0.3126

Iteration 93/100
14/14 [==============================] - 33s 2s/step - loss: 0.4659 - binary_accuracy: 0.7804 - mae: 0.3009

Training Node 1
100/100 [==============================] - 70s 700ms/step - loss: 0.4475 - binary_accuracy: 0.7921 - mae: 0.2895 - val_loss: 0.4743 - val_binary_accuracy: 0.7786 - val_mae: 0.2981

Training Node 2
 62/100 [=================>............] - ETA: 13s - loss: 0.4658 - binary_accuracy: 0.7798 - mae: 0.3025WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 57s 572ms/step - loss: 0.4658 - binary_accuracy: 0.7798 - mae: 0.3025 - val_loss: 0.4802 - val_binary_accuracy: 0.7752 - val_mae: 0.2942

Training Node 3
100/100 [==============================] - 73s 735ms/step - loss: 0.4531 - binary_accuracy: 0.7921 - mae: 0.2927 - val_loss: 0.4876 - val_binary_accuracy: 0.7646 - val_mae: 0.3000

Iteration 94/100
14/14 [==============================] - 33s 2s/step - loss: 0.4608 - binary_accuracy: 0.7830 - mae: 0.2922
New best validation loss: 0.4607756733894348

Training Node 1
100/100 [==============================] - 72s 718ms/step - loss: 0.4481 - binary_accuracy: 0.7924 - mae: 0.2893 - val_loss: 0.4884 - val_binary_accuracy: 0.7739 - val_mae: 0.2994

Training Node 2
 62/100 [=================>............] - ETA: 14s - loss: 0.4615 - binary_accuracy: 0.7818 - mae: 0.2980WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 59s 593ms/step - loss: 0.4615 - binary_accuracy: 0.7818 - mae: 0.2980 - val_loss: 0.5132 - val_binary_accuracy: 0.7644 - val_mae: 0.3002

Training Node 3
100/100 [==============================] - 71s 714ms/step - loss: 0.4489 - binary_accuracy: 0.7932 - mae: 0.2887 - val_loss: 0.4855 - val_binary_accuracy: 0.7729 - val_mae: 0.2885

Iteration 95/100
14/14 [==============================] - 34s 2s/step - loss: 0.4710 - binary_accuracy: 0.7799 - mae: 0.2873

Training Node 1
100/100 [==============================] - 69s 691ms/step - loss: 0.4484 - binary_accuracy: 0.7912 - mae: 0.2891 - val_loss: 0.4968 - val_binary_accuracy: 0.7684 - val_mae: 0.3047

Training Node 2
 62/100 [=================>............] - ETA: 14s - loss: 0.4618 - binary_accuracy: 0.7819 - mae: 0.2971WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 58s 585ms/step - loss: 0.4618 - binary_accuracy: 0.7819 - mae: 0.2971 - val_loss: 0.4831 - val_binary_accuracy: 0.7732 - val_mae: 0.3067

Training Node 3
100/100 [==============================] - 77s 771ms/step - loss: 0.4530 - binary_accuracy: 0.7927 - mae: 0.2923 - val_loss: 0.4840 - val_binary_accuracy: 0.7766 - val_mae: 0.2849

Iteration 96/100
14/14 [==============================] - 33s 2s/step - loss: 0.4642 - binary_accuracy: 0.7828 - mae: 0.2855

Training Node 1
100/100 [==============================] - 69s 691ms/step - loss: 0.4443 - binary_accuracy: 0.7930 - mae: 0.2886 - val_loss: 0.4763 - val_binary_accuracy: 0.7797 - val_mae: 0.2906

Training Node 2
 62/100 [=================>............] - ETA: 12s - loss: 0.4611 - binary_accuracy: 0.7814 - mae: 0.2983WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 56s 559ms/step - loss: 0.4611 - binary_accuracy: 0.7814 - mae: 0.2983 - val_loss: 0.4873 - val_binary_accuracy: 0.7727 - val_mae: 0.2979

Training Node 3
100/100 [==============================] - 70s 703ms/step - loss: 0.4477 - binary_accuracy: 0.7937 - mae: 0.2891 - val_loss: 0.4807 - val_binary_accuracy: 0.7780 - val_mae: 0.2901

Iteration 97/100
14/14 [==============================] - 33s 2s/step - loss: 0.4624 - binary_accuracy: 0.7839 - mae: 0.2852

Training Node 1
100/100 [==============================] - 71s 712ms/step - loss: 0.4452 - binary_accuracy: 0.7923 - mae: 0.2876 - val_loss: 0.5611 - val_binary_accuracy: 0.7484 - val_mae: 0.3062

Training Node 2
 62/100 [=================>............] - ETA: 13s - loss: 0.4633 - binary_accuracy: 0.7849 - mae: 0.2974WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 56s 562ms/step - loss: 0.4633 - binary_accuracy: 0.7849 - mae: 0.2974 - val_loss: 0.4882 - val_binary_accuracy: 0.7667 - val_mae: 0.3034

Training Node 3
100/100 [==============================] - 72s 724ms/step - loss: 0.4447 - binary_accuracy: 0.7973 - mae: 0.2842 - val_loss: 0.4783 - val_binary_accuracy: 0.7758 - val_mae: 0.3056

Iteration 98/100
14/14 [==============================] - 34s 2s/step - loss: 0.4723 - binary_accuracy: 0.7807 - mae: 0.2969

Training Node 1
100/100 [==============================] - 69s 690ms/step - loss: 0.4453 - binary_accuracy: 0.7898 - mae: 0.2885 - val_loss: 0.4763 - val_binary_accuracy: 0.7771 - val_mae: 0.2961

Training Node 2
 62/100 [=================>............] - ETA: 14s - loss: 0.4625 - binary_accuracy: 0.7815 - mae: 0.2988WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 58s 583ms/step - loss: 0.4625 - binary_accuracy: 0.7815 - mae: 0.2988 - val_loss: 0.4809 - val_binary_accuracy: 0.7727 - val_mae: 0.3102

Training Node 3
100/100 [==============================] - 73s 734ms/step - loss: 0.4448 - binary_accuracy: 0.7954 - mae: 0.2860 - val_loss: 0.4871 - val_binary_accuracy: 0.7710 - val_mae: 0.3001

Iteration 99/100
14/14 [==============================] - 33s 2s/step - loss: 0.4625 - binary_accuracy: 0.7843 - mae: 0.2923

Training Node 1
100/100 [==============================] - 69s 694ms/step - loss: 0.4475 - binary_accuracy: 0.7938 - mae: 0.2873 - val_loss: 0.4759 - val_binary_accuracy: 0.7752 - val_mae: 0.3010

Training Node 2
 62/100 [=================>............] - ETA: 12s - loss: 0.4610 - binary_accuracy: 0.7832 - mae: 0.2967WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 100 batches). You may need to use the repeat() function when building your dataset.
100/100 [==============================] - 56s 558ms/step - loss: 0.4610 - binary_accuracy: 0.7832 - mae: 0.2967 - val_loss: 0.4747 - val_binary_accuracy: 0.7768 - val_mae: 0.3107

Training Node 3
100/100 [==============================] - 72s 720ms/step - loss: 0.4456 - binary_accuracy: 0.7922 - mae: 0.2883 - val_loss: 0.4786 - val_binary_accuracy: 0.7770 - val_mae: 0.2946

Iteration 100/100
14/14 [==============================] - 33s 2s/step - loss: 0.4634 - binary_accuracy: 0.7801 - mae: 0.2943
32/32 [==============================] - 2s 67ms/step
No description has been provided for this image
InĀ [89]:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc

# Function to compute bootstrap confidence intervals for AUC
def bootstrap_auc(y_true, y_scores, n_bootstraps=1000, alpha=0.95):
    bootstrapped_aucs = []
    n_samples = len(y_true)

    for _ in range(n_bootstraps):
        # Resample with replacement
        indices = np.random.choice(np.arange(n_samples), size=n_samples, replace=True)
        if len(np.unique(y_true[indices])) < 2:  # Ensure we have both classes
            continue
        fpr, tpr, _ = roc_curve(y_true[indices], y_scores[indices])
        roc_auc = auc(fpr, tpr)
        bootstrapped_aucs.append(roc_auc)

    lower_bound = np.percentile(bootstrapped_aucs, (1 - alpha) / 2 * 100)
    upper_bound = np.percentile(bootstrapped_aucs, (1 + alpha) / 2 * 100)

    return np.mean(bootstrapped_aucs), lower_bound, upper_bound

# Initialize AUC lists for each model and their CIs
node1_auc = []
node2_auc = []
node3_auc = []
swarm_auc = []  # For the merged model

node1_ci = []
node2_ci = []
node3_ci = []
swarm_ci = []

# List of models and their predictions
models = [node1_model, node2_model, node3_model, merged_model]
predictions = [pred_Y1, pred_Y2, pred_Y3, pred_Y]
auc_vectors = [node1_auc, node2_auc, node3_auc, swarm_auc]
ci_vectors = [node1_ci, node2_ci, node3_ci, swarm_ci]

# Loop through each model and calculate AUC for each class
for model_idx, (model, pred_Y) in enumerate(zip(models, predictions)):
    for idx, c_label in enumerate(all_labels):
        # Compute ROC curve and AUC
        fpr, tpr, thresholds = roc_curve(test_Y[:, idx].astype(int), pred_Y[:, idx])
        roc_auc = auc(fpr, tpr)
        
        # Store the AUC in the corresponding vector
        auc_vectors[model_idx].append(roc_auc)
        
        # Calculate bootstrap confidence intervals
        mean_auc, lower_ci, upper_ci = bootstrap_auc(test_Y[:, idx].astype(int), pred_Y[:, idx])
        ci_vectors[model_idx].append((lower_ci, upper_ci))

# Convert lists to numpy arrays for easier handling if needed
node1_auc = np.array(node1_auc)
node2_auc = np.array(node2_auc)
node3_auc = np.array(node3_auc)
swarm_auc = np.array(swarm_auc)

# Convert CIs to numpy arrays for easier handling if needed
node1_ci = np.array(node1_ci)
node2_ci = np.array(node2_ci)
node3_ci = np.array(node3_ci)
swarm_ci = np.array(swarm_ci)

# Print AUC results and confidence intervals for each model
print("Node 1 AUCs:", node1_auc)
print("Node 1 CIs:", node1_ci)
print("Node 2 AUCs:", node2_auc)
print("Node 2 CIs:", node2_ci)
print("Node 3 AUCs:", node3_auc)
print("Node 3 CIs:", node3_ci)
print("Swarm AUCs:", swarm_auc)
print("Swarm CIs:", swarm_ci)
Node 1 AUCs: [0.6709933  0.78021017 0.61867496 0.67109827]
Node 1 CIs: [[0.63435783 0.70795539]
 [0.74694334 0.8106894 ]
 [0.57294136 0.66409326]
 [0.63451435 0.70468165]]
Node 2 AUCs: [0.5        0.51329338 0.49656559 0.5       ]
Node 2 CIs: [[0.5        0.5       ]
 [0.4956993  0.529479  ]
 [0.48624648 0.505131  ]
 [0.5        0.5       ]]
Node 3 AUCs: [0.66868255 0.78267966 0.61121907 0.68273014]
Node 3 CIs: [[0.63024724 0.70562155]
 [0.75159669 0.8143354 ]
 [0.57120531 0.65342623]
 [0.64867497 0.71792728]]
Swarm AUCs: [0.77944338 0.87535439 0.71844307 0.73093353]
Swarm CIs: [[0.74578813 0.81325313]
 [0.84827684 0.89796918]
 [0.68063146 0.75492717]
 [0.69810308 0.76173492]]
InĀ [90]:
# Create a list of dictionaries for each node's AUC values and conditions
data = []

# Define nodes and their corresponding AUC lists and CI lists
nodes = ['Node 1', 'Node 2', 'Node 3', 'Swarm']
auc_lists = [node1_auc, node2_auc, node3_auc, swarm_auc]
ci_lists = [node1_ci, node2_ci, node3_ci, swarm_ci]

# Populate the data list with AUC values, lower CI, upper CI, and their corresponding nodes and conditions
for node, auc_values, ci_values in zip(nodes, auc_lists, ci_lists):
    for condition, auc, ci in zip(all_labels, auc_values, ci_values):
        data.append({
            'AUC': auc, 
            'Lower CI': ci[0], 
            'Upper CI': ci[1], 
            'Node': node, 
            'Condition': condition
        })

# Create a DataFrame from the list of dictionaries
result = pd.DataFrame(data)

# Create a DataFrame from the list of dictionaries
result = pd.DataFrame(data)
# Print result DataFrame to verify
result
Out[90]:
AUC Lower CI Upper CI Node Condition
0 0.670993 0.634358 0.707955 Node 1 Atelectasis
1 0.780210 0.746943 0.810689 Node 1 Effusion
2 0.618675 0.572941 0.664093 Node 1 Infiltration
3 0.671098 0.634514 0.704682 Node 1 No Finding
4 0.500000 0.500000 0.500000 Node 2 Atelectasis
5 0.513293 0.495699 0.529479 Node 2 Effusion
6 0.496566 0.486246 0.505131 Node 2 Infiltration
7 0.500000 0.500000 0.500000 Node 2 No Finding
8 0.668683 0.630247 0.705622 Node 3 Atelectasis
9 0.782680 0.751597 0.814335 Node 3 Effusion
10 0.611219 0.571205 0.653426 Node 3 Infiltration
11 0.682730 0.648675 0.717927 Node 3 No Finding
12 0.779443 0.745788 0.813253 Swarm Atelectasis
13 0.875354 0.848277 0.897969 Swarm Effusion
14 0.718443 0.680631 0.754927 Swarm Infiltration
15 0.730934 0.698103 0.761735 Swarm No Finding
InĀ [91]:
import matplotlib.pyplot as plt
import seaborn as sns

# Create the plot
fig, axes = plt.subplots(1, len(all_labels), figsize=(20, 6), sharey=True)
fig.suptitle('AUC Scores Across Nodes and Conditions with Confidence Intervals', fontsize=16)

for ax, condition in zip(axes, all_labels):
    # Filter data for the current condition
    condition_data = result[result['Condition'] == condition]
    
    # Plot individual points for AUC values
    sns.stripplot(x='Node', y='AUC', data=condition_data, ax=ax, color='black', size=6, jitter=True)

    # Add error bars for confidence intervals
    for i in range(len(condition_data)):
        ax.errorbar(x=i, y=condition_data['AUC'].iloc[i],
                    yerr=[[condition_data['AUC'].iloc[i] - condition_data['Lower CI'].iloc[i]], 
                           [condition_data['Upper CI'].iloc[i] - condition_data['AUC'].iloc[i]]],
                    fmt='o', color='red', capsize=5)

    ax.set_title(condition)
    ax.set_ylim(0.4, 1.05)  # Adjust based on your AUC range
    ax.set_xlabel('')
    
    if ax == axes[0]:
        ax.set_ylabel('AUC')
    else:
        ax.set_ylabel('')
    
    # Rotate x-axis labels
    ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha='right')

# Adjust layout
plt.tight_layout()
plt.subplots_adjust(top=0.9)

# Show plot
plt.show()
/var/folders/w3/gtm29qhx0wj4wg_y2_jw07hw0000gn/T/ipykernel_76436/3150646550.py:32: UserWarning: set_ticklabels() should only be used with a fixed number of ticks, i.e. after set_ticks() or using a FixedLocator.
  ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha='right')
/var/folders/w3/gtm29qhx0wj4wg_y2_jw07hw0000gn/T/ipykernel_76436/3150646550.py:32: UserWarning: set_ticklabels() should only be used with a fixed number of ticks, i.e. after set_ticks() or using a FixedLocator.
  ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha='right')
/var/folders/w3/gtm29qhx0wj4wg_y2_jw07hw0000gn/T/ipykernel_76436/3150646550.py:32: UserWarning: set_ticklabels() should only be used with a fixed number of ticks, i.e. after set_ticks() or using a FixedLocator.
  ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha='right')
/var/folders/w3/gtm29qhx0wj4wg_y2_jw07hw0000gn/T/ipykernel_76436/3150646550.py:32: UserWarning: set_ticklabels() should only be used with a fixed number of ticks, i.e. after set_ticks() or using a FixedLocator.
  ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha='right')
No description has been provided for this image